Import platform/external/python/parse_type am: 81e1c61c10 am: 86fdec7c7d

Change-Id: I5f4f3f4d8e6c6de36f5728510eb2826bba70e3b1
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
new file mode 100644
index 0000000..b340bf2
--- /dev/null
+++ b/.bumpversion.cfg
@@ -0,0 +1,7 @@
+[bumpversion]
+current_version = 0.5.3
+files = setup.py parse_type/__init__.py .bumpversion.cfg pytest.ini
+commit = False
+tag = False
+allow_dirty = True
+
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..c45d482
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,46 @@
+# =========================================================================
+# COVERAGE CONFIGURATION FILE: .coveragerc
+# =========================================================================
+# LANGUAGE: Python
+# SEE ALSO:
+#  * http://nedbatchelder.com/code/coverage/
+#  * http://nedbatchelder.com/code/coverage/config.html
+# =========================================================================
+
+[run]
+# data_file = .coverage
+source   = parse_type
+branch   = True
+parallel = True
+omit = mock.py, ez_setup.py, distribute.py
+
+
+[report]
+ignore_errors = True
+show_missing  = True
+# Regexes for lines to exclude from consideration
+exclude_lines =
+    # Have to re-enable the standard pragma
+    pragma: no cover
+
+    # Don't complain about missing debug-only code:
+    def __repr__
+    if self\.debug
+
+    # Don't complain if tests don't hit defensive assertion code:
+    raise AssertionError
+    raise NotImplementedError
+
+    # Don't complain if non-runnable code isn't run:
+    if 0:
+    if False:
+    if __name__ == .__main__.:
+
+
+[html]
+directory = build/coverage.html
+title = Coverage Report: parse_type
+
+[xml]
+output = build/coverage.xml
+
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..d262338
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,26 @@
+# =============================================================================
+# EDITOR CONFIGURATION: http://editorconfig.org
+# =============================================================================
+
+root = true
+
+# -- DEFAULT: Unix-style newlines with a newline ending every file.
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.{py,rst,ini,txt}]
+indent_style = space
+indent_size = 4
+
+[*.feature]
+indent_style = space
+indent_size = 2
+
+[**/makefile]
+indent_style = tab
+
+[*.{cmd,bat}]
+end_of_line = crlf
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..40edeae
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,33 @@
+*.py[cod]
+
+# Packages
+MANIFEST
+*.egg
+*.egg-info
+dist
+build
+downloads
+__pycache__
+
+# Installer logs
+pip-log.txt
+
+Pipfile
+Pipfile.lock
+
+# Unit test / coverage reports
+.cache/
+.eggs/
+.pytest_cache/
+.tox/
+.venv*/
+.coverage
+
+# -- IDE-RELATED:
+.idea/
+.vscode/
+.project
+.pydevproject
+
+# -- EXCLUDE GIT-SUBPROJECTS:
+/lib/parse/
diff --git a/.rosinstall b/.rosinstall
new file mode 100644
index 0000000..c13f7c1
--- /dev/null
+++ b/.rosinstall
@@ -0,0 +1,6 @@
+# GIT MULTI-REPO TOOL: wstool
+# REQUIRES: wstool >= 0.1.17 (better: 0.1.18; not in pypi yet)
+- git:
+    local-name: lib/parse
+    uri: https://github.com/r1chardj0n3s/parse
+    version: master
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..66b50f6
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,20 @@
+language: python
+sudo: false
+python:
+  - "3.7"
+  - "2.7"
+  - "3.8-dev"
+  - "pypy"
+  - "pypy3"
+
+# -- TEST-BALLON: Check if Python 3.6 is actually Python 3.5.1 or newer
+matrix:
+  allow_failures:
+    - python: "3.8-dev"
+    - python: "nightly"
+
+install:
+  - pip install -U -r py.requirements/ci.travis.txt
+  - python setup.py -q install
+script:
+  - pytest tests
diff --git a/CHANGES.txt b/CHANGES.txt
new file mode 100644
index 0000000..b732fc8
--- /dev/null
+++ b/CHANGES.txt
@@ -0,0 +1,15 @@
+Version History
+===============================================================================
+
+Version: 0.4.3 (2018-04-xx, unreleased)
+-------------------------------------------------------------------------------
+
+CHANGES:
+
+* UPDATE: parse-1.8.3 (was: parse-1.8.2)
+  NOTE: ``parse`` module and ``parse_type.parse`` module are now identical.
+
+BACKWARD INCOMPATIBLE CHANGES:
+
+* RENAMED: type_converter.regex_group_count attribute (was: .group_count)
+  (pull-request review changes of the ``parse`` module).
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..2c758e6
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013-2019, jenisys
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+  Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+  Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+
+  Neither the name of the {organization} nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..afa925d
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,20 @@
+include README.rst
+include LICENSE
+include .coveragerc
+include .editorconfig
+include *.py
+include *.rst
+include *.txt
+include *.ini
+include *.cfg
+include *.yaml
+include bin/invoke*
+
+recursive-include bin           *.sh *.py *.cmd
+recursive-include py.requirements  *.txt
+recursive-include tasks         *.py *.txt *.rst *.zip
+recursive-include tests         *.py
+# -- DISABLED: recursive-include docs          *.rst *.txt *.py
+
+prune .tox
+prune .venv*
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..b687a96
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "parse_type"
+description:
+    "parse_type extends the parse module and provide type converters, "
+    "cardinality constraint and an extended parser that supports the "
+    "CardinalityField naming schema."
+
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://github.com/jenisys/parse_type"
+  }
+  url {
+    type: GIT
+    value: "https://github.com/jenisys/parse_type.git"
+  }
+  version: "0.5.2"
+  last_upgrade_date { year: 2020 month: 4 day: 22 }
+  license_type: NOTICE
+}
diff --git a/MODULE_LICENSE_BSD b/MODULE_LICENSE_BSD
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_BSD
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..e035a90
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,5 @@
+# Default owners are top 3 or more active developers of the past 1 or 2 years
+# or people with more than 10 commits last year.
+# Please update this list if you find better owner candidates.
+jimtang@google.com
+yangbill@google.com
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..7781245
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,277 @@
+===============================================================================
+parse_type
+===============================================================================
+
+.. image:: https://img.shields.io/travis/jenisys/parse_type/master.svg
+    :target: https://travis-ci.org/jenisys/parse_type
+    :alt: Travis CI Build Status
+
+.. image:: https://img.shields.io/pypi/v/parse_type.svg
+    :target: https://pypi.python.org/pypi/parse_type
+    :alt: Latest Version
+
+.. image:: https://img.shields.io/pypi/dm/parse_type.svg
+    :target: https://pypi.python.org/pypi/parse_type
+    :alt: Downloads
+
+.. image:: https://img.shields.io/pypi/l/parse_type.svg
+    :target: https://pypi.python.org/pypi/parse_type/
+    :alt: License
+
+
+`parse_type`_ extends the `parse`_ module (opposite of `string.format()`_)
+with the following features:
+
+* build type converters for common use cases (enum/mapping, choice)
+* build a type converter with a cardinality constraint (0..1, 0..*, 1..*)
+    from the type converter with cardinality=1.
+* compose a type converter from other type converters
+* an extended parser that supports the CardinalityField naming schema
+    and creates missing type variants (0..1, 0..*, 1..*) from the
+    primary type converter
+
+.. _parse_type: http://pypi.python.org/pypi/parse_type
+.. _parse:      http://pypi.python.org/pypi/parse
+.. _`string.format()`: http://docs.python.org/library/string.html#format-string-syntax
+
+
+Definitions
+-------------------------------------------------------------------------------
+
+*type converter*
+    A type converter function that converts a textual representation
+    of a value type into instance of this value type.
+    In addition, a type converter function is often annotated with attributes
+    that allows the `parse`_ module to use it in a generic way.
+    A type converter is also called a *parse_type* (a definition used here).
+
+*cardinality field*
+    A naming convention for related types that differ in cardinality.
+    A cardinality field is a type name suffix in the format of a field.
+    It allows parse format expression, ala::
+
+        "{person:Person}"     #< Cardinality: 1    (one; the normal case)
+        "{person:Person?}"    #< Cardinality: 0..1 (zero or one  = optional)
+        "{persons:Person*}"   #< Cardinality: 0..* (zero or more = many0)
+        "{persons:Person+}"   #< Cardinality: 1..* (one  or more = many)
+
+    This naming convention mimics the relationship descriptions in UML diagrams.
+
+
+Basic Example
+-------------------------------------------------------------------------------
+
+Define an own type converter for numbers (integers):
+
+.. code-block:: python
+
+    # -- USE CASE:
+    def parse_number(text):
+        return int(text)
+    parse_number.pattern = r"\d+"  # -- REGULAR EXPRESSION pattern for type.
+
+This is equivalent to:
+
+.. code-block:: python
+
+    import parse
+
+    @parse.with_pattern(r"\d+")
+    def parse_number(text):
+         return int(text)
+    assert hasattr(parse_number, "pattern")
+    assert parse_number.pattern == r"\d+"
+
+
+.. code-block:: python
+
+    # -- USE CASE: Use the type converter with the parse module.
+    schema = "Hello {number:Number}"
+    parser = parse.Parser(schema, dict(Number=parse_number))
+    result = parser.parse("Hello 42")
+    assert result is not None, "REQUIRE: text matches the schema."
+    assert result["number"] == 42
+
+    result = parser.parse("Hello XXX")
+    assert result is None, "MISMATCH: text does not match the schema."
+
+.. hint::
+
+    The described functionality above is standard functionality
+    of the `parse`_ module. It serves as introduction for the remaining cases.
+
+
+Cardinality
+-------------------------------------------------------------------------------
+
+Create an type converter for "ManyNumbers" (List, separated with commas)
+with cardinality "1..* = 1+" (many) from the type converter for a "Number".
+
+.. code-block:: python
+
+    # -- USE CASE: Create new type converter with a cardinality constraint.
+    # CARDINALITY: many := one or more (1..*)
+    from parse import Parser
+    from parse_type import TypeBuilder
+    parse_numbers = TypeBuilder.with_many(parse_number, listsep=",")
+
+    schema = "List: {numbers:ManyNumbers}"
+    parser = Parser(schema, dict(ManyNumbers=parse_numbers))
+    result = parser.parse("List: 1, 2, 3")
+    assert result["numbers"] == [1, 2, 3]
+
+
+Create an type converter for an "OptionalNumbers" with cardinality "0..1 = ?"
+(optional) from the type converter for a "Number".
+
+.. code-block:: python
+
+    # -- USE CASE: Create new type converter with cardinality constraint.
+    # CARDINALITY: optional := zero or one (0..1)
+    from parse import Parser
+    from parse_type import TypeBuilder
+
+    parse_optional_number = TypeBuilder.with_optional(parse_number)
+    schema = "Optional: {number:OptionalNumber}"
+    parser = Parser(schema, dict(OptionalNumber=parse_optional_number))
+    result = parser.parse("Optional: 42")
+    assert result["number"] == 42
+    result = parser.parse("Optional: ")
+    assert result["number"] == None
+
+
+Enumeration (Name-to-Value Mapping)
+-------------------------------------------------------------------------------
+
+Create an type converter for an "Enumeration" from the description of
+the mapping as dictionary.
+
+.. code-block:: python
+
+    # -- USE CASE: Create a type converter for an enumeration.
+    from parse import Parser
+    from parse_type import TypeBuilder
+
+    parse_enum_yesno = TypeBuilder.make_enum({"yes": True, "no": False})
+    parser = Parser("Answer: {answer:YesNo}", dict(YesNo=parse_enum_yesno))
+    result = parser.parse("Answer: yes")
+    assert result["answer"] == True
+
+
+Create an type converter for an "Enumeration" from the description of
+the mapping as an enumeration class (`Python 3.4 enum`_ or the `enum34`_
+backport; see also: `PEP-0435`_).
+
+.. code-block:: python
+
+    # -- USE CASE: Create a type converter for enum34 enumeration class.
+    # NOTE: Use Python 3.4 or enum34 backport.
+    from parse import Parser
+    from parse_type import TypeBuilder
+    from enum import Enum
+
+    class Color(Enum):
+        red   = 1
+        green = 2
+        blue  = 3
+
+    parse_enum_color = TypeBuilder.make_enum(Color)
+    parser = Parser("Select: {color:Color}", dict(Color=parse_enum_color))
+    result = parser.parse("Select: red")
+    assert result["color"] is Color.red
+
+.. _`Python 3.4 enum`: http://docs.python.org/3.4/library/enum.html#module-enum
+.. _enum34:   http://pypi.python.org/pypi/enum34
+.. _PEP-0435: http://www.python.org/dev/peps/pep-0435
+
+
+Choice (Name Enumeration)
+-------------------------------------------------------------------------------
+
+A Choice data type allows to select one of several strings.
+
+Create an type converter for an "Choice" list, a list of unique names
+(as string).
+
+.. code-block:: python
+
+    from parse import Parser
+    from parse_type import TypeBuilder
+
+    parse_choice_yesno = TypeBuilder.make_choice(["yes", "no"])
+    schema = "Answer: {answer:ChoiceYesNo}"
+    parser = Parser(schema, dict(ChoiceYesNo=parse_choice_yesno))
+    result = parser.parse("Answer: yes")
+    assert result["answer"] == "yes"
+
+
+Variant (Type Alternatives)
+-------------------------------------------------------------------------------
+
+Sometimes you need a type converter that can accept text for multiple
+type converter alternatives. This is normally called a "variant" (or: union).
+
+Create an type converter for an "Variant" type that accepts:
+
+* Numbers (positive numbers, as integer)
+* Color enum values (by name)
+
+.. code-block:: python
+
+    from parse import Parser, with_pattern
+    from parse_type import TypeBuilder
+    from enum import Enum
+
+    class Color(Enum):
+        red   = 1
+        green = 2
+        blue  = 3
+
+    @with_pattern(r"\d+")
+    def parse_number(text):
+        return int(text)
+
+    # -- MAKE VARIANT: Alternatives of different type converters.
+    parse_color = TypeBuilder.make_enum(Color)
+    parse_variant = TypeBuilder.make_variant([parse_number, parse_color])
+    schema = "Variant: {variant:Number_or_Color}"
+    parser = Parser(schema, dict(Number_or_Color=parse_variant))
+
+    # -- TEST VARIANT: With number, color and mismatch.
+    result = parser.parse("Variant: 42")
+    assert result["variant"] == 42
+    result = parser.parse("Variant: blue")
+    assert result["variant"] is Color.blue
+    result = parser.parse("Variant: __MISMATCH__")
+    assert not result
+
+
+
+Extended Parser with CardinalityField support
+-------------------------------------------------------------------------------
+
+The parser extends the ``parse.Parser`` and adds the following functionality:
+
+* supports the CardinalityField naming scheme
+* automatically creates missing type variants for types with
+  a CardinalityField by using the primary type converter for cardinality=1
+* extends the provide type converter dictionary with new type variants.
+
+Example:
+
+.. code-block:: python
+
+    # -- USE CASE: Parser with CardinalityField support.
+    # NOTE: Automatically adds missing type variants with CardinalityField part.
+    # USE:  parse_number() type converter from above.
+    from parse_type.cfparse import Parser
+
+    # -- PREPARE: parser, adds missing type variant for cardinality 1..* (many)
+    type_dict = dict(Number=parse_number)
+    schema = "List: {numbers:Number+}"
+    parser = Parser(schema, type_dict)
+    assert "Number+" in type_dict, "Created missing type variant based on: Number"
+
+    # -- USE: parser.
+    result = parser.parse("List: 1, 2, 3")
+    assert result["numbers"] == [1, 2, 3]
diff --git a/bin/invoke b/bin/invoke
new file mode 100755
index 0000000..e9800e8
--- /dev/null
+++ b/bin/invoke
@@ -0,0 +1,8 @@
+#!/bin/sh
+#!/bin/bash
+# RUN INVOKE: From bundled ZIP file.
+
+HERE=$(dirname $0)
+export INVOKE_TASKS_USE_VENDOR_BUNDLES="yes"
+
+python ${HERE}/../tasks/_vendor/invoke.zip $*
diff --git a/bin/invoke.cmd b/bin/invoke.cmd
new file mode 100644
index 0000000..9303432
--- /dev/null
+++ b/bin/invoke.cmd
@@ -0,0 +1,9 @@
+@echo off
+REM RUN INVOKE: From bundled ZIP file.
+
+setlocal
+set HERE=%~dp0
+set INVOKE_TASKS_USE_VENDOR_BUNDLES="yes"
+if not defined PYTHON   set PYTHON=python
+
+%PYTHON% %HERE%../tasks/_vendor/invoke.zip "%*"
diff --git a/bin/make_localpi.py b/bin/make_localpi.py
new file mode 100755
index 0000000..7661bb6
--- /dev/null
+++ b/bin/make_localpi.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Utility script to create a pypi-like directory structure (localpi)
+from a number of Python packages in a directory of the local filesystem.
+
+  DIRECTORY STRUCTURE (before):
+      +-- downloads/
+           +-- alice-1.0.zip
+           +-- alice-1.0.tar.gz
+           +-- bob-1.3.0.tar.gz
+           +-- bob-1.4.2.tar.gz
+           +-- charly-1.0.tar.bz2
+
+  DIRECTORY STRUCTURE (afterwards):
+      +-- downloads/
+           +-- simple/
+           |      +-- alice/index.html   --> ../../alice-*.*
+           |      +-- bob/index.html     --> ../../bob-*.*
+           |      +-- charly/index.html  --> ../../charly-*.*
+           |      +-- index.html  --> alice/, bob/, ...
+           +-- alice-1.0.zip
+           +-- alice-1.0.tar.gz
+           +-- bob-1.3.0.tar.gz
+           +-- bob-1.4.2.tar.gz
+           +-- charly-1.0.tar.bz2
+
+USAGE EXAMPLE:
+
+    mkdir -p /tmp/downloads
+    pip install --download=/tmp/downloads argparse Jinja2
+    make_localpi.py /tmp/downloads
+    pip install --index-url=file:///tmp/downloads/simple argparse Jinja2
+
+ALTERNATIVE:
+
+    pip install --download=/tmp/downloads argparse Jinja2
+    pip install --find-links=/tmp/downloads --no-index argparse Jinja2
+"""
+
+from __future__ import with_statement, print_function
+from fnmatch import fnmatch
+import os.path
+import shutil
+import sys
+
+
+__author__  = "Jens Engel"
+__version__ = "0.2"
+__license__ = "BSD"
+__copyright__ = "(c) 2013 by Jens Engel"
+
+
+class Package(object):
+    """
+    Package entity that keeps track of:
+      * one or more versions of this package
+      * one or more archive types
+    """
+    PATTERNS = [
+        "*.egg", "*.exe", "*.whl", "*.zip", "*.tar.gz", "*.tar.bz2", "*.7z"
+    ]
+
+    def __init__(self, filename, name=None):
+        if not name and filename:
+            name = self.get_pkgname(filename)
+        self.name  = name
+        self.files = []
+        if filename:
+            self.files.append(filename)
+
+    @property
+    def versions(self):
+        versions_info = [ self.get_pkgversion(p) for p in self.files ]
+        return versions_info
+
+    @classmethod
+    def get_pkgversion(cls, filename):
+        parts = os.path.basename(filename).rsplit("-", 1)
+        version = ""
+        if len(parts) >= 2:
+            version = parts[1]
+        for pattern in cls.PATTERNS:
+            assert pattern.startswith("*")
+            suffix = pattern[1:]
+            if version.endswith(suffix):
+                version = version[:-len(suffix)]
+                break
+        return version
+
+    @staticmethod
+    def get_pkgname(filename):
+        name = os.path.basename(filename).rsplit("-", 1)[0]
+        if name.startswith("http%3A") or name.startswith("https%3A"):
+            # -- PIP DOWNLOAD-CACHE PACKAGE FILE NAME SCHEMA:
+            pos = name.rfind("%2F")
+            name = name[pos+3:]
+        return name
+
+    @staticmethod
+    def splitext(filename):
+        fname = os.path.splitext(filename)[0]
+        if fname.endswith(".tar"):
+            fname = os.path.splitext(fname)[0]
+        return fname
+
+    @classmethod
+    def isa(cls, filename):
+        basename = os.path.basename(filename)
+        if basename.startswith("."):
+            return False
+        for pattern in cls.PATTERNS:
+            if fnmatch(filename, pattern):
+                return True
+        return False
+
+
+def make_index_for(package, index_dir, verbose=True):
+    """
+    Create an 'index.html' for one package.
+
+    :param package:   Package object to use.
+    :param index_dir: Where 'index.html' should be created.
+    """
+    index_template = """\
+<html>
+<head><title>{title}</title></head>
+<body>
+<h1>{title}</h1>
+<ul>
+{packages}
+</ul>
+</body>
+</html>
+"""
+    item_template = '<li><a href="{1}">{0}</a></li>'
+    index_filename = os.path.join(index_dir, "index.html")
+    if not os.path.isdir(index_dir):
+        os.makedirs(index_dir)
+
+    parts = []
+    for pkg_filename in package.files:
+        pkg_name = os.path.basename(pkg_filename)
+        if pkg_name == "index.html":
+            # -- ROOT-INDEX:
+            pkg_name = os.path.basename(os.path.dirname(pkg_filename))
+        else:
+            pkg_name = package.splitext(pkg_name)
+        pkg_relpath_to = os.path.relpath(pkg_filename, index_dir)
+        parts.append(item_template.format(pkg_name, pkg_relpath_to))
+
+    if not parts:
+        print("OOPS: Package %s has no files" % package.name)
+        return
+
+    if verbose:
+        root_index = not Package.isa(package.files[0])
+        if root_index:
+            info = "with %d package(s)" % len(package.files)
+        else:
+            package_versions = sorted(set(package.versions))
+            info = ", ".join(reversed(package_versions))
+        message = "%-30s  %s" % (package.name, info)
+        print(message)
+
+    with open(index_filename, "w") as f:
+        packages = "\n".join(parts)
+        text = index_template.format(title=package.name, packages=packages)
+        f.write(text.strip())
+        f.close()
+
+
+def make_package_index(download_dir):
+    """
+    Create a pypi server like file structure below download directory.
+
+    :param download_dir:    Download directory with packages.
+
+    EXAMPLE BEFORE:
+      +-- downloads/
+           +-- alice-1.0.zip
+           +-- alice-1.0.tar.gz
+           +-- bob-1.3.0.tar.gz
+           +-- bob-1.4.2.tar.gz
+           +-- charly-1.0.tar.bz2
+
+    EXAMPLE AFTERWARDS:
+      +-- downloads/
+           +-- simple/
+           |      +-- alice/index.html   --> ../../alice-*.*
+           |      +-- bob/index.html     --> ../../bob-*.*
+           |      +-- charly/index.html  --> ../../charly-*.*
+           |      +-- index.html  --> alice/index.html, bob/index.html, ...
+           +-- alice-1.0.zip
+           +-- alice-1.0.tar.gz
+           +-- bob-1.3.0.tar.gz
+           +-- bob-1.4.2.tar.gz
+           +-- charly-1.0.tar.bz2
+    """
+    if not os.path.isdir(download_dir):
+        raise ValueError("No such directory: %r" % download_dir)
+
+    pkg_rootdir = os.path.join(download_dir, "simple")
+    if os.path.isdir(pkg_rootdir):
+        shutil.rmtree(pkg_rootdir, ignore_errors=True)
+    os.mkdir(pkg_rootdir)
+
+    # -- STEP: Collect all packages.
+    package_map = {}
+    packages = []
+    for filename in sorted(os.listdir(download_dir)):
+        if not Package.isa(filename):
+            continue
+        pkg_filepath = os.path.join(download_dir, filename)
+        package_name = Package.get_pkgname(pkg_filepath)
+        package = package_map.get(package_name, None)
+        if not package:
+            # -- NEW PACKAGE DETECTED: Store/register package.
+            package = Package(pkg_filepath)
+            package_map[package.name] = package
+            packages.append(package)
+        else:
+            # -- SAME PACKAGE: Collect other variant/version.
+            package.files.append(pkg_filepath)
+
+    # -- STEP: Make local PYTHON PACKAGE INDEX.
+    root_package = Package(None, "Python Package Index")
+    root_package.files = [ os.path.join(pkg_rootdir, pkg.name, "index.html")
+                           for pkg in packages ]
+    make_index_for(root_package, pkg_rootdir)
+    for package in packages:
+        index_dir = os.path.join(pkg_rootdir, package.name)
+        make_index_for(package, index_dir)
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == "__main__":
+    if (len(sys.argv) != 2) or "-h" in sys.argv[1:] or "--help" in sys.argv[1:]:
+        print("USAGE: %s DOWNLOAD_DIR" % os.path.basename(sys.argv[0]))
+        print(__doc__)
+        sys.exit(1)
+    make_package_index(sys.argv[1])
diff --git a/bin/project_bootstrap.sh b/bin/project_bootstrap.sh
new file mode 100755
index 0000000..76d5af8
--- /dev/null
+++ b/bin/project_bootstrap.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# =============================================================================
+# BOOTSTRAP PROJECT: Download all requirements
+# =============================================================================
+# test ${PIP_DOWNLOADS_DIR} || mkdir -p ${PIP_DOWNLOADS_DIR}
+# tox -e init
+
+set -e
+
+# -- CONFIGURATION:
+HERE=`dirname $0`
+TOP="${HERE}/.."
+: ${PIP_INDEX_URL="http://pypi.python.org/simple"}
+: ${PIP_DOWNLOAD_DIR:="${TOP}/downloads"}
+export PIP_INDEX_URL PIP_DOWNLOADS_DIR
+
+# -- EXECUTE STEPS:
+${HERE}/toxcmd.py mkdir ${PIP_DOWNLOAD_DIR}
+pip install --download=${PIP_DOWNLOAD_DIR} -r ${TOP}/requirements/all.txt
+${HERE}/make_localpi.py ${PIP_DOWNLOAD_DIR}
+
diff --git a/bin/toxcmd.py b/bin/toxcmd.py
new file mode 100755
index 0000000..38bb6d5
--- /dev/null
+++ b/bin/toxcmd.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+"""
+Provides a command container for additional tox commands, used in "tox.ini".
+
+COMMANDS:
+
+  * copytree
+  * copy
+  * py2to3
+
+REQUIRES:
+  * argparse
+"""
+
+from glob import glob
+import argparse
+import inspect
+import os.path
+import shutil
+import sys
+
+__author__ = "Jens Engel"
+__copyright__ = "(c) 2013 by Jens Engel"
+__license__ = "BSD"
+
+# -----------------------------------------------------------------------------
+# CONSTANTS:
+# -----------------------------------------------------------------------------
+VERSION = "0.1.0"
+FORMATTER_CLASS = argparse.RawDescriptionHelpFormatter
+
+
+# -----------------------------------------------------------------------------
+# SUBCOMMAND: copytree
+# -----------------------------------------------------------------------------
+def command_copytree(args):
+    """
+    Copy one or more source directory(s) below a destination directory.
+    Parts of the destination directory path are created if needed.
+    Similar to the UNIX command: 'cp -R srcdir destdir'
+    """
+    for srcdir in args.srcdirs:
+        basename = os.path.basename(srcdir)
+        destdir2 = os.path.normpath(os.path.join(args.destdir, basename))
+        if os.path.exists(destdir2):
+            shutil.rmtree(destdir2)
+        sys.stdout.write("copytree: %s => %s\n" % (srcdir, destdir2))
+        shutil.copytree(srcdir, destdir2)
+    return 0
+
+
+def setup_parser_copytree(parser):
+    parser.add_argument("srcdirs", nargs="+", help="Source directory(s)")
+    parser.add_argument("destdir", help="Destination directory")
+
+
+command_copytree.usage = "%(prog)s srcdir... destdir"
+command_copytree.short = "Copy source dir(s) below a destination directory."
+command_copytree.setup_parser = setup_parser_copytree
+
+
+# -----------------------------------------------------------------------------
+# SUBCOMMAND: copy
+# -----------------------------------------------------------------------------
+def command_copy(args):
+    """
+    Copy one or more source-files(s) to a destpath (destfile or destdir).
+    Destdir mode is used if:
+      * More than one srcfile is provided
+      * Last parameter ends with a slash ("/").
+      * Last parameter is an existing directory
+
+    Destination directory path is created if needed.
+    Similar to the UNIX command: 'cp srcfile... destpath'
+    """
+    sources = args.sources
+    destpath = args.destpath
+    source_files = []
+    for file_ in sources:
+        if "*" in file_:
+            selected = glob(file_)
+            source_files.extend(selected)
+        elif os.path.isfile(file_):
+            source_files.append(file_)
+
+    if destpath.endswith("/") or os.path.isdir(destpath) or len(sources) > 1:
+        # -- DESTDIR-MODE: Last argument is a directory.
+        destdir = destpath
+    else:
+        # -- DESTFILE-MODE: Copy (and rename) one file.
+        assert len(source_files) == 1
+        destdir = os.path.dirname(destpath)
+
+    # -- WORK-HORSE: Copy one or more files to destpath.
+    if not os.path.isdir(destdir):
+        sys.stdout.write("copy: Create dir %s\n" % destdir)
+        os.makedirs(destdir)
+    for source in source_files:
+        destname = os.path.join(destdir, os.path.basename(source))
+        sys.stdout.write("copy: %s => %s\n" % (source, destname))
+        shutil.copy(source, destname)
+    return 0
+
+
+def setup_parser_copy(parser):
+    parser.add_argument("sources", nargs="+", help="Source files.")
+    parser.add_argument("destpath", help="Destination path")
+
+
+command_copy.usage = "%(prog)s sources... destpath"
+command_copy.short = "Copy one or more source files to a destinition."
+command_copy.setup_parser = setup_parser_copy
+
+
+# -----------------------------------------------------------------------------
+# SUBCOMMAND: mkdir
+# -----------------------------------------------------------------------------
+def command_mkdir(args):
+    """
+    Create a non-existing directory (or more ...).
+    If the directory exists, the step is skipped.
+    Similar to the UNIX command: 'mkdir -p dir'
+    """
+    errors = 0
+    for directory in args.dirs:
+        if os.path.exists(directory):
+            if not os.path.isdir(directory):
+                # -- SANITY CHECK: directory exists, but as file...
+                sys.stdout.write("mkdir: %s\n" % directory)
+                sys.stdout.write("ERROR: Exists already, but as file...\n")
+                errors += 1
+        else:
+            # -- NORMAL CASE: Directory does not exits yet.
+            assert not os.path.isdir(directory)
+            sys.stdout.write("mkdir: %s\n" % directory)
+            os.makedirs(directory)
+    return errors
+
+
+def setup_parser_mkdir(parser):
+    parser.add_argument("dirs", nargs="+", help="Directory(s)")
+
+command_mkdir.usage = "%(prog)s dir..."
+command_mkdir.short = "Create non-existing directory (or more...)."
+command_mkdir.setup_parser = setup_parser_mkdir
+
+# -----------------------------------------------------------------------------
+# SUBCOMMAND: py2to3
+# -----------------------------------------------------------------------------
+def command_py2to3(args):
+    """
+    Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources.
+    """
+    from lib2to3.main import main
+    sys.exit(main("lib2to3.fixes", args=args.sources))
+
+
+def setup_parser4py2to3(parser):
+    parser.add_argument("sources", nargs="+", help="Source files.")
+
+
+command_py2to3.name = "2to3"
+command_py2to3.usage = "%(prog)s sources..."
+command_py2to3.short = "Apply python's 2to3 tool to Python sources."
+command_py2to3.setup_parser = setup_parser4py2to3
+
+
+# -----------------------------------------------------------------------------
+# COMMAND HELPERS/UTILS:
+# -----------------------------------------------------------------------------
+def discover_commands():
+    commands = []
+    for name, func in inspect.getmembers(inspect.getmodule(toxcmd_main)):
+        if name.startswith("__"):
+            continue
+        if name.startswith("command_") and callable(func):
+            command_name0 = name.replace("command_", "")
+            command_name = getattr(func, "name", command_name0)
+            commands.append(Command(command_name, func))
+    return commands
+
+
+class Command(object):
+    def __init__(self, name, func):
+        assert isinstance(name, basestring)
+        assert callable(func)
+        self.name = name
+        self.func = func
+        self.parser = None
+
+    def setup_parser(self, command_parser):
+        setup_parser = getattr(self.func, "setup_parser", None)
+        if setup_parser and callable(setup_parser):
+            setup_parser(command_parser)
+        else:
+            command_parser.add_argument("args", nargs="*")
+
+    @property
+    def usage(self):
+        usage = getattr(self.func, "usage", None)
+        return usage
+
+    @property
+    def short_description(self):
+        short_description = getattr(self.func, "short", "")
+        return short_description
+
+    @property
+    def description(self):
+        return inspect.getdoc(self.func)
+
+    def __call__(self, args):
+        return self.func(args)
+
+
+# -----------------------------------------------------------------------------
+# MAIN-COMMAND:
+# -----------------------------------------------------------------------------
+def toxcmd_main(args=None):
+    """Command util with subcommands for tox environments."""
+    usage = "USAGE: %(prog)s [OPTIONS] COMMAND args..."
+    if args is None:
+        args = sys.argv[1:]
+
+    # -- STEP: Build command-line parser.
+    parser = argparse.ArgumentParser(description=inspect.getdoc(toxcmd_main),
+                                     formatter_class=FORMATTER_CLASS)
+    common_parser = parser.add_argument_group("Common options")
+    common_parser.add_argument("--version", action="version", version=VERSION)
+    subparsers = parser.add_subparsers(help="commands")
+    for command in discover_commands():
+        command_parser = subparsers.add_parser(command.name,
+                                               usage=command.usage,
+                                               description=command.description,
+                                               help=command.short_description,
+                                               formatter_class=FORMATTER_CLASS)
+        command_parser.set_defaults(func=command)
+        command.setup_parser(command_parser)
+        command.parser = command_parser
+
+    # -- STEP: Process command-line and run command.
+    options = parser.parse_args(args)
+    command_function = options.func
+    return command_function(options)
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == "__main__":
+    sys.exit(toxcmd_main())
diff --git a/invoke.yaml b/invoke.yaml
new file mode 100644
index 0000000..23b756c
--- /dev/null
+++ b/invoke.yaml
@@ -0,0 +1,30 @@
+# =====================================================
+# INVOKE CONFIGURATION: parse_type
+# =====================================================
+# -- ON WINDOWS:
+# run:
+#   echo: true
+#   pty:  false
+#   shell: C:\Windows\System32\cmd.exe
+# =====================================================
+
+project:
+    name: parse_type
+    repo: "pypi"
+    # -- TODO: until upload problems are resolved.
+    repo_url: "https://upload.pypi.org/legacy/"
+
+tasks:
+     auto_dash_names: false
+
+run:
+    echo: true
+    # DISABLED: pty:  true
+
+cleanup_all:
+    extra_directories:
+      - build
+      - dist
+      - .hypothesis
+      - .pytest_cache
+
diff --git a/parse_type/Android.bp b/parse_type/Android.bp
new file mode 100644
index 0000000..4acc48c
--- /dev/null
+++ b/parse_type/Android.bp
@@ -0,0 +1,30 @@
+// Copyright 2020 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+python_library {
+    name: "py-parse_type",
+    host_supported: true,
+    srcs: [
+        "*.py",
+    ],
+    version: {
+        py2: {
+            enabled: true,
+        },
+        py3: {
+            enabled: true,
+        },
+    },
+}
+
diff --git a/parse_type/__init__.py b/parse_type/__init__.py
new file mode 100644
index 0000000..76d2f3f
--- /dev/null
+++ b/parse_type/__init__.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+"""
+This module extends the :mod:`parse` to build and derive additional
+parse-types from other, existing types.
+"""
+
+from __future__ import absolute_import
+from parse_type.cardinality import Cardinality
+from parse_type.builder import TypeBuilder, build_type_dict
+
+__all__ = ["Cardinality", "TypeBuilder", "build_type_dict"]
+__version__ = "0.5.3"
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2012-2019 by Jens Engel (https://github/jenisys/)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/parse_type/builder.py b/parse_type/builder.py
new file mode 100644
index 0000000..4bde1c8
--- /dev/null
+++ b/parse_type/builder.py
@@ -0,0 +1,312 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=missing-docstring
+r"""
+Provides support to compose user-defined parse types.
+
+Cardinality
+------------
+
+It is often useful to constrain how often a data type occurs.
+This is also called the cardinality of a data type (in a context).
+The supported cardinality are:
+
+  * 0..1  zero_or_one,  optional<T>: T or None
+  * 0..N  zero_or_more, list_of<T>
+  * 1..N  one_or_more,  list_of<T> (many)
+
+
+.. doctest:: cardinality
+
+    >>> from parse_type import TypeBuilder
+    >>> from parse import Parser
+
+    >>> def parse_number(text):
+    ...     return int(text)
+    >>> parse_number.pattern = r"\d+"
+
+    >>> parse_many_numbers = TypeBuilder.with_many(parse_number)
+    >>> more_types = { "Numbers": parse_many_numbers }
+    >>> parser = Parser("List: {numbers:Numbers}", more_types)
+    >>> parser.parse("List: 1, 2, 3")
+    <Result () {'numbers': [1, 2, 3]}>
+
+
+Enumeration Type (Name-to-Value Mappings)
+-----------------------------------------
+
+An Enumeration data type allows to select one of several enum values by using
+its name. The converter function returns the selected enum value.
+
+.. doctest:: make_enum
+
+    >>> parse_enum_yesno = TypeBuilder.make_enum({"yes": True, "no": False})
+    >>> more_types = { "YesNo": parse_enum_yesno }
+    >>> parser = Parser("Answer: {answer:YesNo}", more_types)
+    >>> parser.parse("Answer: yes")
+    <Result () {'answer': True}>
+
+
+Choice (Name Enumerations)
+-----------------------------
+
+A Choice data type allows to select one of several strings.
+
+.. doctest:: make_choice
+
+    >>> parse_choice_yesno = TypeBuilder.make_choice(["yes", "no"])
+    >>> more_types = { "ChoiceYesNo": parse_choice_yesno }
+    >>> parser = Parser("Answer: {answer:ChoiceYesNo}", more_types)
+    >>> parser.parse("Answer: yes")
+    <Result () {'answer': 'yes'}>
+
+"""
+
+from __future__ import absolute_import
+import inspect
+import re
+import enum
+from parse_type.cardinality import pattern_group_count, \
+    Cardinality, TypeBuilder as CardinalityTypeBuilder
+
+__all__ = ["TypeBuilder", "build_type_dict", "parse_anything"]
+
+
+class TypeBuilder(CardinalityTypeBuilder):
+    """
+    Provides a utility class to build type-converters (parse_types) for
+    the :mod:`parse` module.
+    """
+    default_strict = True
+    default_re_opts = (re.IGNORECASE | re.DOTALL)
+
+    @classmethod
+    def make_list(cls, item_converter=None, listsep=','):
+        """
+        Create a type converter for a list of items (many := 1..*).
+        The parser accepts anything and the converter needs to fail on errors.
+
+        :param item_converter:  Type converter for an item.
+        :param listsep:  List separator to use (as string).
+        :return: Type converter function object for the list.
+        """
+        if not item_converter:
+            item_converter = parse_anything
+        return cls.with_cardinality(Cardinality.many, item_converter,
+                                    pattern=cls.anything_pattern,
+                                    listsep=listsep)
+
+    @staticmethod
+    def make_enum(enum_mappings):
+        """
+        Creates a type converter for an enumeration or text-to-value mapping.
+
+        :param enum_mappings: Defines enumeration names and values.
+        :return: Type converter function object for the enum/mapping.
+        """
+        if (inspect.isclass(enum_mappings) and
+                issubclass(enum_mappings, enum.Enum)):
+            enum_class = enum_mappings
+            enum_mappings = enum_class.__members__
+
+        def convert_enum(text):
+            if text not in convert_enum.mappings:
+                text = text.lower()     # REQUIRED-BY: parse re.IGNORECASE
+            return convert_enum.mappings[text]    #< text.lower() ???
+        convert_enum.pattern = r"|".join(enum_mappings.keys())
+        convert_enum.mappings = enum_mappings
+        return convert_enum
+
+    @staticmethod
+    def _normalize_choices(choices, transform):
+        assert transform is None or callable(transform)
+        if transform:
+            choices = [transform(value)  for value in choices]
+        else:
+            choices = list(choices)
+        return choices
+
+    @classmethod
+    def make_choice(cls, choices, transform=None, strict=None):
+        """
+        Creates a type-converter function to select one from a list of strings.
+        The type-converter function returns the selected choice_text.
+        The :param:`transform()` function is applied in the type converter.
+        It can be used to enforce the case (because parser uses re.IGNORECASE).
+
+        :param choices: List of strings as choice.
+        :param transform: Optional, initial transform function for parsed text.
+        :return: Type converter function object for this choices.
+        """
+        # -- NOTE: Parser uses re.IGNORECASE flag
+        #    => transform may enforce case.
+        choices = cls._normalize_choices(choices, transform)
+        if strict is None:
+            strict = cls.default_strict
+
+        def convert_choice(text):
+            if transform:
+                text = transform(text)
+            if strict and text not in convert_choice.choices:
+                values = ", ".join(convert_choice.choices)
+                raise ValueError("%s not in: %s" % (text, values))
+            return text
+        convert_choice.pattern = r"|".join(choices)
+        convert_choice.choices = choices
+        return convert_choice
+
+    @classmethod
+    def make_choice2(cls, choices, transform=None, strict=None):
+        """
+        Creates a type converter to select one item from a list of strings.
+        The type converter function returns a tuple (index, choice_text).
+
+        :param choices: List of strings as choice.
+        :param transform: Optional, initial transform function for parsed text.
+        :return: Type converter function object for this choices.
+        """
+        choices = cls._normalize_choices(choices, transform)
+        if strict is None:
+            strict = cls.default_strict
+
+        def convert_choice2(text):
+            if transform:
+                text = transform(text)
+            if strict and text not in convert_choice2.choices:
+                values = ", ".join(convert_choice2.choices)
+                raise ValueError("%s not in: %s" % (text, values))
+            index = convert_choice2.choices.index(text)
+            return index, text
+        convert_choice2.pattern = r"|".join(choices)
+        convert_choice2.choices = choices
+        return convert_choice2
+
+    @classmethod
+    def make_variant(cls, converters, re_opts=None, compiled=False, strict=True):
+        """
+        Creates a type converter for a number of type converter alternatives.
+        The first matching type converter is used.
+
+        REQUIRES: type_converter.pattern attribute
+
+        :param converters: List of type converters as alternatives.
+        :param re_opts:  Regular expression options zu use (=default_re_opts).
+        :param compiled: Use compiled regexp matcher, if true (=False).
+        :param strict:   Enable assertion checks.
+        :return: Type converter function object.
+
+        .. note::
+
+            Works only with named fields in :class:`parse.Parser`.
+            Parser needs group_index delta for unnamed/fixed fields.
+            This is not supported for user-defined types.
+            Otherwise, you need to use :class:`parse_type.parse.Parser`
+            (patched version of the :mod:`parse` module).
+        """
+        # -- NOTE: Uses double-dispatch with regex pattern rematch because
+        #          match is not passed through to primary type converter.
+        assert converters, "REQUIRE: Non-empty list."
+        if len(converters) == 1:
+            return converters[0]
+        if re_opts is None:
+            re_opts = cls.default_re_opts
+
+        pattern = r")|(".join([tc.pattern for tc in converters])
+        pattern = r"("+ pattern + ")"
+        group_count = len(converters)
+        for converter in converters:
+            group_count += pattern_group_count(converter.pattern)
+
+        if compiled:
+            convert_variant = cls.__create_convert_variant_compiled(converters,
+                                                                    re_opts,
+                                                                    strict)
+        else:
+            convert_variant = cls.__create_convert_variant(re_opts, strict)
+        convert_variant.pattern = pattern
+        convert_variant.converters = tuple(converters)
+        convert_variant.regex_group_count = group_count
+        return convert_variant
+
+    @staticmethod
+    def __create_convert_variant(re_opts, strict):
+        # -- USE: Regular expression pattern (compiled on use).
+        def convert_variant(text, m=None):
+            # pylint: disable=invalid-name, unused-argument, missing-docstring
+            for converter in convert_variant.converters:
+                if re.match(converter.pattern, text, re_opts):
+                    return converter(text)
+            # -- pragma: no cover
+            assert not strict, "OOPS-VARIANT-MISMATCH: %s" % text
+            return None
+        return convert_variant
+
+    @staticmethod
+    def __create_convert_variant_compiled(converters, re_opts, strict):
+        # -- USE: Compiled regular expression matcher.
+        for converter in converters:
+            matcher = getattr(converter, "matcher", None)
+            if not matcher:
+                converter.matcher = re.compile(converter.pattern, re_opts)
+
+        def convert_variant(text, m=None):
+            # pylint: disable=invalid-name, unused-argument, missing-docstring
+            for converter in convert_variant.converters:
+                if converter.matcher.match(text):
+                    return converter(text)
+            # -- pragma: no cover
+            assert not strict, "OOPS-VARIANT-MISMATCH: %s" % text
+            return None
+        return convert_variant
+
+
+def build_type_dict(converters):
+    """
+    Builds type dictionary for user-defined type converters,
+    used by :mod:`parse` module.
+    This requires that each type converter has a "name" attribute.
+
+    :param converters: List of type converters (parse_types)
+    :return: Type converter dictionary
+    """
+    more_types = {}
+    for converter in converters:
+        assert callable(converter)
+        more_types[converter.name] = converter
+    return more_types
+
+# -----------------------------------------------------------------------------
+# COMMON TYPE CONVERTERS
+# -----------------------------------------------------------------------------
+def parse_anything(text, match=None, match_start=0):
+    """
+    Provides a generic type converter that accepts anything and returns
+    the text (unchanged).
+
+    :param text:  Text to convert (as string).
+    :return: Same text (as string).
+    """
+    # pylint: disable=unused-argument
+    return text
+parse_anything.pattern = TypeBuilder.anything_pattern
+
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2012-2017 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/parse_type/cardinality.py b/parse_type/cardinality.py
new file mode 100644
index 0000000..6857767
--- /dev/null
+++ b/parse_type/cardinality.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+"""
+This module simplifies to build parse types and regular expressions
+for a data type with the specified cardinality.
+"""
+
+# -- USE: enum34
+from __future__ import absolute_import
+from enum import Enum
+
+
+# -----------------------------------------------------------------------------
+# FUNCTIONS:
+# -----------------------------------------------------------------------------
+def pattern_group_count(pattern):
+    """Count the pattern-groups within a regex-pattern (as text)."""
+    return pattern.replace(r"\(", "").count("(")
+
+
+# -----------------------------------------------------------------------------
+# CLASS: Cardinality (Enum Class)
+# -----------------------------------------------------------------------------
+class Cardinality(Enum):
+    """Cardinality enumeration class to simplify building regular expression
+    patterns for a data type with the specified cardinality.
+    """
+    # pylint: disable=bad-whitespace
+    __order__ = "one, zero_or_one, zero_or_more, one_or_more"
+    one          = (None, 0)
+    zero_or_one  = (r"(%s)?", 1)                 # SCHEMA: pattern
+    zero_or_more = (r"(%s)?(\s*%s\s*(%s))*", 3)  # SCHEMA: pattern sep pattern
+    one_or_more  = (r"(%s)(\s*%s\s*(%s))*",  3)  # SCHEMA: pattern sep pattern
+
+    # -- ALIASES:
+    optional = zero_or_one
+    many0 = zero_or_more
+    many  = one_or_more
+
+    def __init__(self, schema, group_count=0):
+        self.schema = schema
+        self.group_count = group_count  #< Number of match groups.
+
+    def is_many(self):
+        """Checks for a more general interpretation of "many".
+
+        :return: True, if Cardinality.zero_or_more or Cardinality.one_or_more.
+        """
+        return ((self is Cardinality.zero_or_more) or
+                (self is Cardinality.one_or_more))
+
+    def make_pattern(self, pattern, listsep=','):
+        """Make pattern for a data type with the specified cardinality.
+
+        .. code-block:: python
+
+            yes_no_pattern = r"yes|no"
+            many_yes_no = Cardinality.one_or_more.make_pattern(yes_no_pattern)
+
+        :param pattern:  Regular expression for type (as string).
+        :param listsep:  List separator for multiple items (as string, optional)
+        :return: Regular expression pattern for type with cardinality.
+        """
+        if self is Cardinality.one:
+            return pattern
+        elif self is Cardinality.zero_or_one:
+            return self.schema % pattern
+        # -- OTHERWISE:
+        return self.schema % (pattern, listsep, pattern)
+
+    def compute_group_count(self, pattern):
+        """Compute the number of regexp match groups when the pattern is provided
+        to the :func:`Cardinality.make_pattern()` method.
+
+        :param pattern: Item regexp pattern (as string).
+        :return: Number of regexp match groups in the cardinality pattern.
+        """
+        group_count = self.group_count
+        pattern_repeated = 1
+        if self.is_many():
+            pattern_repeated = 2
+        return group_count + pattern_repeated * pattern_group_count(pattern)
+
+
+# -----------------------------------------------------------------------------
+# CLASS: TypeBuilder
+# -----------------------------------------------------------------------------
+class TypeBuilder(object):
+    """Provides a utility class to build type-converters (parse_types) for parse.
+    It supports to build new type-converters for different cardinality
+    based on the type-converter for cardinality one.
+    """
+    anything_pattern = r".+?"
+    default_pattern = anything_pattern
+
+    @classmethod
+    def with_cardinality(cls, cardinality, converter, pattern=None,
+                         listsep=','):
+        """Creates a type converter for the specified cardinality
+        by using the type converter for T.
+
+        :param cardinality: Cardinality to use (0..1, 0..*, 1..*).
+        :param converter: Type converter (function) for data type T.
+        :param pattern:  Regexp pattern for an item (=converter.pattern).
+        :return: type-converter for optional<T> (T or None).
+        """
+        if cardinality is Cardinality.one:
+            return converter
+        # -- NORMAL-CASE
+        builder_func = getattr(cls, "with_%s" % cardinality.name)
+        if cardinality is Cardinality.zero_or_one:
+            return builder_func(converter, pattern)
+        # -- MANY CASE: 0..*, 1..*
+        return builder_func(converter, pattern, listsep=listsep)
+
+    @classmethod
+    def with_zero_or_one(cls, converter, pattern=None):
+        """Creates a type converter for a T with 0..1 times
+        by using the type converter for one item of T.
+
+        :param converter: Type converter (function) for data type T.
+        :param pattern:  Regexp pattern for an item (=converter.pattern).
+        :return: type-converter for optional<T> (T or None).
+        """
+        cardinality = Cardinality.zero_or_one
+        if not pattern:
+            pattern = getattr(converter, "pattern", cls.default_pattern)
+        optional_pattern = cardinality.make_pattern(pattern)
+        group_count = cardinality.compute_group_count(pattern)
+
+        def convert_optional(text, m=None):
+            # pylint: disable=invalid-name, unused-argument, missing-docstring
+            if text:
+                text = text.strip()
+            if not text:
+                return None
+            return converter(text)
+        convert_optional.pattern = optional_pattern
+        convert_optional.regex_group_count = group_count
+        return convert_optional
+
+    @classmethod
+    def with_zero_or_more(cls, converter, pattern=None, listsep=","):
+        """Creates a type converter function for a list<T> with 0..N items
+        by using the type converter for one item of T.
+
+        :param converter: Type converter (function) for data type T.
+        :param pattern:  Regexp pattern for an item (=converter.pattern).
+        :param listsep:  Optional list separator between items (default: ',')
+        :return: type-converter for list<T>
+        """
+        cardinality = Cardinality.zero_or_more
+        if not pattern:
+            pattern = getattr(converter, "pattern", cls.default_pattern)
+        many0_pattern = cardinality.make_pattern(pattern, listsep)
+        group_count = cardinality.compute_group_count(pattern)
+
+        def convert_list0(text, m=None):
+            # pylint: disable=invalid-name, unused-argument, missing-docstring
+            if text:
+                text = text.strip()
+            if not text:
+                return []
+            return [converter(part.strip()) for part in text.split(listsep)]
+        convert_list0.pattern = many0_pattern
+        # OLD convert_list0.group_count = group_count
+        convert_list0.regex_group_count = group_count
+        return convert_list0
+
+    @classmethod
+    def with_one_or_more(cls, converter, pattern=None, listsep=","):
+        """Creates a type converter function for a list<T> with 1..N items
+        by using the type converter for one item of T.
+
+        :param converter: Type converter (function) for data type T.
+        :param pattern:  Regexp pattern for an item (=converter.pattern).
+        :param listsep:  Optional list separator between items (default: ',')
+        :return: Type converter for list<T>
+        """
+        cardinality = Cardinality.one_or_more
+        if not pattern:
+            pattern = getattr(converter, "pattern", cls.default_pattern)
+        many_pattern = cardinality.make_pattern(pattern, listsep)
+        group_count = cardinality.compute_group_count(pattern)
+
+        def convert_list(text, m=None):
+            # pylint: disable=invalid-name, unused-argument, missing-docstring
+            return [converter(part.strip()) for part in text.split(listsep)]
+        convert_list.pattern = many_pattern
+        # OLD: convert_list.group_count = group_count
+        convert_list.regex_group_count = group_count
+        return convert_list
+
+    # -- ALIAS METHODS:
+    @classmethod
+    def with_optional(cls, converter, pattern=None):
+        """Alias for :py:meth:`with_zero_or_one()` method."""
+        return cls.with_zero_or_one(converter, pattern)
+
+    @classmethod
+    def with_many(cls, converter, pattern=None, listsep=','):
+        """Alias for :py:meth:`with_one_or_more()` method."""
+        return cls.with_one_or_more(converter, pattern, listsep)
+
+    @classmethod
+    def with_many0(cls, converter, pattern=None, listsep=','):
+        """Alias for :py:meth:`with_zero_or_more()` method."""
+        return cls.with_zero_or_more(converter, pattern, listsep)
diff --git a/parse_type/cardinality_field.py b/parse_type/cardinality_field.py
new file mode 100644
index 0000000..4e892ed
--- /dev/null
+++ b/parse_type/cardinality_field.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+"""
+Provides support for cardinality fields.
+A cardinality field is a type suffix for parse format expression, ala:
+
+    "{person:Person?}"   #< Cardinality: 0..1 = zero or one  = optional
+    "{persons:Person*}"  #< Cardinality: 0..* = zero or more = many0
+    "{persons:Person+}"  #< Cardinality: 1..* = one  or more = many
+"""
+
+from __future__ import absolute_import
+import six
+from parse_type.cardinality import Cardinality, TypeBuilder
+
+
+class MissingTypeError(KeyError):   # pylint: disable=missing-docstring
+    pass
+
+# -----------------------------------------------------------------------------
+# CLASS: Cardinality (Field Part)
+# -----------------------------------------------------------------------------
+class CardinalityField(object):
+    """Cardinality field for parse format expression, ala:
+
+        "{person:Person?}"   #< Cardinality: 0..1 = zero or one  = optional
+        "{persons:Person*}"  #< Cardinality: 0..* = zero or more = many0
+        "{persons:Person+}"  #< Cardinality: 1..* = one  or more = many
+    """
+
+    # -- MAPPING SUPPORT:
+    pattern_chars = "?*+"
+    from_char_map = {
+        '?': Cardinality.zero_or_one,
+        '*': Cardinality.zero_or_more,
+        '+': Cardinality.one_or_more,
+    }
+    to_char_map = dict([(value, key)  for key, value in from_char_map.items()])
+
+    @classmethod
+    def matches_type(cls, type_name):
+        """Checks if a type name uses the CardinalityField naming scheme.
+
+        :param type_name:  Type name to check (as string).
+        :return: True, if type name has CardinalityField name suffix.
+        """
+        return type_name and type_name[-1] in CardinalityField.pattern_chars
+
+    @classmethod
+    def split_type(cls, type_name):
+        """Split type of a type name with CardinalityField suffix into its parts.
+
+        :param type_name:  Type name (as string).
+        :return: Tuple (type_basename, cardinality)
+        """
+        if cls.matches_type(type_name):
+            basename = type_name[:-1]
+            cardinality = cls.from_char_map[type_name[-1]]
+        else:
+            # -- ASSUME: Cardinality.one
+            cardinality = Cardinality.one
+            basename = type_name
+        return (basename, cardinality)
+
+    @classmethod
+    def make_type(cls, basename, cardinality):
+        """Build new type name according to CardinalityField naming scheme.
+
+        :param basename:  Type basename of primary type (as string).
+        :param cardinality: Cardinality of the new type (as Cardinality item).
+        :return: Type name with CardinalityField suffix (if needed)
+        """
+        if cardinality is Cardinality.one:
+            # -- POSTCONDITION: assert not cls.make_type(type_name)
+            return basename
+        # -- NORMAL CASE: type with CardinalityField suffix.
+        type_name = "%s%s" % (basename, cls.to_char_map[cardinality])
+        # -- POSTCONDITION: assert cls.make_type(type_name)
+        return type_name
+
+
+# -----------------------------------------------------------------------------
+# CLASS: CardinalityFieldTypeBuilder
+# -----------------------------------------------------------------------------
+class CardinalityFieldTypeBuilder(object):
+    """Utility class to create type converters based on:
+
+      * the CardinalityField naming scheme and
+      * type converter for cardinality=1
+    """
+
+    listsep = ','
+
+    @classmethod
+    def create_type_variant(cls, type_name, type_converter):
+        r"""Create type variants for types with a cardinality field.
+        The new type converters are based on the type converter with
+        cardinality=1.
+
+        .. code-block:: python
+
+            import parse
+
+            @parse.with_pattern(r'\d+')
+            def parse_number(text):
+                return int(text)
+
+            new_type = CardinalityFieldTypeBuilder.create_type_variant(
+                                    "Number+", parse_number)
+            new_type = CardinalityFieldTypeBuilder.create_type_variant(
+                                    "Number+", dict(Number=parse_number))
+
+        :param type_name:  Type name with cardinality field suffix.
+        :param type_converter:  Type converter or type dictionary.
+        :return: Type converter variant (function).
+        :raises: ValueError, if type_name does not end with CardinalityField
+        :raises: MissingTypeError, if type_converter is missing in type_dict
+        """
+        assert isinstance(type_name, six.string_types)
+        if not CardinalityField.matches_type(type_name):
+            message = "type_name='%s' has no CardinalityField" % type_name
+            raise ValueError(message)
+
+        primary_name, cardinality = CardinalityField.split_type(type_name)
+        if isinstance(type_converter, dict):
+            type_dict = type_converter
+            type_converter = type_dict.get(primary_name, None)
+            if not type_converter:
+                raise MissingTypeError(primary_name)
+
+        assert callable(type_converter)
+        type_variant = TypeBuilder.with_cardinality(cardinality,
+                                                    type_converter,
+                                                    listsep=cls.listsep)
+        type_variant.name = type_name
+        return type_variant
+
+
+    @classmethod
+    def create_type_variants(cls, type_names, type_dict):
+        """Create type variants for types with a cardinality field.
+        The new type converters are based on the type converter with
+        cardinality=1.
+
+        .. code-block:: python
+
+            # -- USE: parse_number() type converter function.
+            new_types = CardinalityFieldTypeBuilder.create_type_variants(
+                            ["Number?", "Number+"], dict(Number=parse_number))
+
+        :param type_names: List of type names with cardinality field suffix.
+        :param type_dict:  Type dictionary with named type converters.
+        :return: Type dictionary with type converter variants.
+        """
+        type_variant_dict = {}
+        for type_name in type_names:
+            type_variant = cls.create_type_variant(type_name, type_dict)
+            type_variant_dict[type_name] = type_variant
+        return type_variant_dict
+
+    # MAYBE: Check if really needed.
+    @classmethod
+    def create_missing_type_variants(cls, type_names, type_dict):
+        """Create missing type variants for types with a cardinality field.
+
+        :param type_names: List of type names with cardinality field suffix.
+        :param type_dict:  Type dictionary with named type converters.
+        :return: Type dictionary with missing type converter variants.
+        """
+        missing_type_names = [name for name in type_names
+                              if name not in type_dict]
+        return cls.create_type_variants(missing_type_names, type_dict)
diff --git a/parse_type/cfparse.py b/parse_type/cfparse.py
new file mode 100644
index 0000000..1032a87
--- /dev/null
+++ b/parse_type/cfparse.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+Provides an extended :class:`parse.Parser` class that supports the
+cardinality fields in (user-defined) types.
+"""
+
+from __future__ import absolute_import
+import logging
+import parse
+from .cardinality_field import CardinalityField, CardinalityFieldTypeBuilder
+from .parse_util import FieldParser
+
+
+log = logging.getLogger(__name__)   # pylint: disable=invalid-name
+
+
+class Parser(parse.Parser):
+    """Provides an extended :class:`parse.Parser` with cardinality field support.
+    A cardinality field is a type suffix for parse format expression, ala:
+
+        "... {person:Person?} ..."   -- OPTIONAL: Cardinality zero or one, 0..1
+        "... {persons:Person*} ..."  -- MANY0: Cardinality zero or more, 0..
+        "... {persons:Person+} ..."  -- MANY:  Cardinality one  or more, 1..
+
+    When the primary type converter for cardinality=1 is provided,
+    the type variants for the other cardinality cases can be derived from it.
+
+    This parser class automatically creates missing type variants for types
+    with a cardinality field and passes the extended type dictionary
+    to its base class.
+    """
+    # -- TYPE-BUILDER: For missing types in Fields with CardinalityField part.
+    type_builder = CardinalityFieldTypeBuilder
+
+    def __init__(self, schema, extra_types=None, case_sensitive=False,
+                 type_builder=None):
+        """Creates a parser with CardinalityField part support.
+
+        :param schema:  Parse schema (or format) for parser (as string).
+        :param extra_types:  Type dictionary with type converters (or None).
+        :param case_sensitive: Indicates if case-sensitive regexp are used.
+        :param type_builder: Type builder to use for missing types.
+        """
+        if extra_types is None:
+            extra_types = {}
+        missing = self.create_missing_types(schema, extra_types, type_builder)
+        if missing:
+            # pylint: disable=logging-not-lazy
+            log.debug("MISSING TYPES: %s" % ",".join(missing.keys()))
+            extra_types.update(missing)
+
+        # -- FINALLY: Delegate to base class.
+        super(Parser, self).__init__(schema, extra_types,
+                                     case_sensitive=case_sensitive)
+
+    @classmethod
+    def create_missing_types(cls, schema, type_dict, type_builder=None):
+        """Creates missing types for fields with a CardinalityField part.
+        It is assumed that the primary type converter for cardinality=1
+        is registered in the type dictionary.
+
+        :param schema:  Parse schema (or format) for parser (as string).
+        :param type_dict:  Type dictionary with type converters.
+        :param type_builder: Type builder to use for missing types.
+        :return: Type dictionary with missing types. Empty, if none.
+        :raises: MissingTypeError,
+                if a primary type converter with cardinality=1 is missing.
+        """
+        if not type_builder:
+            type_builder = cls.type_builder
+
+        missing = cls.extract_missing_special_type_names(schema, type_dict)
+        return type_builder.create_type_variants(missing, type_dict)
+
+    @staticmethod
+    def extract_missing_special_type_names(schema, type_dict):
+        # pylint: disable=invalid-name
+        """Extract the type names for fields with CardinalityField part.
+        Selects only the missing type names that are not in the type dictionary.
+
+        :param schema:     Parse schema to use (as string).
+        :param type_dict:  Type dictionary with type converters.
+        :return: Generator with missing type names (as string).
+        """
+        for name in FieldParser.extract_types(schema):
+            if CardinalityField.matches_type(name) and (name not in type_dict):
+                yield name
diff --git a/parse_type/parse.py b/parse_type/parse.py
new file mode 100644
index 0000000..dfc5ce2
--- /dev/null
+++ b/parse_type/parse.py
@@ -0,0 +1,1350 @@
+# -*- coding: UTF-8 -*-
+# BASED-ON: https://github.com/r1chardj0n3s/parse/parse.py
+# VERSION:  parse 1.12.0
+# Same as original parse modules.
+#
+# pylint: disable=line-too-long, invalid-name, too-many-locals, too-many-arguments
+# pylint: disable=redefined-builtin, too-few-public-methods, no-else-return
+# pylint: disable=unused-variable, no-self-use, missing-docstring
+# pylint: disable=unused-argument, unused-variable
+# pylint: disable=too-many-branches, too-many-statements
+# pylint: disable=all
+#
+#  -- ORIGINAL-CODE STARTS-HERE ------------------------------------------------
+r'''Parse strings using a specification based on the Python format() syntax.
+
+   ``parse()`` is the opposite of ``format()``
+
+The module is set up to only export ``parse()``, ``search()``, ``findall()``,
+and ``with_pattern()`` when ``import \*`` is used:
+
+>>> from parse import *
+
+From there it's a simple thing to parse a string:
+
+>>> parse("It's {}, I love it!", "It's spam, I love it!")
+<Result ('spam',) {}>
+>>> _[0]
+'spam'
+
+Or to search a string for some pattern:
+
+>>> search('Age: {:d}\n', 'Name: Rufus\nAge: 42\nColor: red\n')
+<Result (42,) {}>
+
+Or find all the occurrences of some pattern in a string:
+
+>>> ''.join(r.fixed[0] for r in findall(">{}<", "<p>the <b>bold</b> text</p>"))
+'the bold text'
+
+If you're going to use the same pattern to match lots of strings you can
+compile it once:
+
+>>> from parse import compile
+>>> p = compile("It's {}, I love it!")
+>>> print(p)
+<Parser "It's {}, I love it!">
+>>> p.parse("It's spam, I love it!")
+<Result ('spam',) {}>
+
+("compile" is not exported for ``import *`` usage as it would override the
+built-in ``compile()`` function)
+
+The default behaviour is to match strings case insensitively. You may match with
+case by specifying `case_sensitive=True`:
+
+>>> parse('SPAM', 'spam', case_sensitive=True) is None
+True
+
+
+Format Syntax
+-------------
+
+A basic version of the `Format String Syntax`_ is supported with anonymous
+(fixed-position), named and formatted fields::
+
+   {[field name]:[format spec]}
+
+Field names must be a valid Python identifiers, including dotted names;
+element indexes imply dictionaries (see below for example).
+
+Numbered fields are also not supported: the result of parsing will include
+the parsed fields in the order they are parsed.
+
+The conversion of fields to types other than strings is done based on the
+type in the format specification, which mirrors the ``format()`` behaviour.
+There are no "!" field conversions like ``format()`` has.
+
+Some simple parse() format string examples:
+
+>>> parse("Bring me a {}", "Bring me a shrubbery")
+<Result ('shrubbery',) {}>
+>>> r = parse("The {} who say {}", "The knights who say Ni!")
+>>> print(r)
+<Result ('knights', 'Ni!') {}>
+>>> print(r.fixed)
+('knights', 'Ni!')
+>>> r = parse("Bring out the holy {item}", "Bring out the holy hand grenade")
+>>> print(r)
+<Result () {'item': 'hand grenade'}>
+>>> print(r.named)
+{'item': 'hand grenade'}
+>>> print(r['item'])
+hand grenade
+>>> 'item' in r
+True
+
+Note that `in` only works if you have named fields. Dotted names and indexes
+are possible though the application must make additional sense of the result:
+
+>>> r = parse("Mmm, {food.type}, I love it!", "Mmm, spam, I love it!")
+>>> print(r)
+<Result () {'food.type': 'spam'}>
+>>> print(r.named)
+{'food.type': 'spam'}
+>>> print(r['food.type'])
+spam
+>>> r = parse("My quest is {quest[name]}", "My quest is to seek the holy grail!")
+>>> print(r)
+<Result () {'quest': {'name': 'to seek the holy grail!'}}>
+>>> print(r['quest'])
+{'name': 'to seek the holy grail!'}
+>>> print(r['quest']['name'])
+to seek the holy grail!
+
+If the text you're matching has braces in it you can match those by including
+a double-brace ``{{`` or ``}}`` in your format string, just like format() does.
+
+
+Format Specification
+--------------------
+
+Most often a straight format-less ``{}`` will suffice where a more complex
+format specification might have been used.
+
+Most of `format()`'s `Format Specification Mini-Language`_ is supported:
+
+   [[fill]align][0][width][.precision][type]
+
+The differences between `parse()` and `format()` are:
+
+- The align operators will cause spaces (or specified fill character) to be
+  stripped from the parsed value. The width is not enforced; it just indicates
+  there may be whitespace or "0"s to strip.
+- Numeric parsing will automatically handle a "0b", "0o" or "0x" prefix.
+  That is, the "#" format character is handled automatically by d, b, o
+  and x formats. For "d" any will be accepted, but for the others the correct
+  prefix must be present if at all.
+- Numeric sign is handled automatically.
+- The thousands separator is handled automatically if the "n" type is used.
+- The types supported are a slightly different mix to the format() types.  Some
+  format() types come directly over: "d", "n", "%", "f", "e", "b", "o" and "x".
+  In addition some regular expression character group types "D", "w", "W", "s"
+  and "S" are also available.
+- The "e" and "g" types are case-insensitive so there is not need for
+  the "E" or "G" types.
+
+===== =========================================== ========
+Type  Characters Matched                          Output
+===== =========================================== ========
+l     Letters (ASCII)                             str
+w     Letters, numbers and underscore             str
+W     Not letters, numbers and underscore         str
+s     Whitespace                                  str
+S     Non-whitespace                              str
+d     Digits (effectively integer numbers)        int
+D     Non-digit                                   str
+n     Numbers with thousands separators (, or .)  int
+%     Percentage (converted to value/100.0)       float
+f     Fixed-point numbers                         float
+F     Decimal numbers                             Decimal
+e     Floating-point numbers with exponent        float
+      e.g. 1.1e-10, NAN (all case insensitive)
+g     General number format (either d, f or e)    float
+b     Binary numbers                              int
+o     Octal numbers                               int
+x     Hexadecimal numbers (lower and upper case)  int
+ti    ISO 8601 format date/time                   datetime
+      e.g. 1972-01-20T10:21:36Z ("T" and "Z"
+      optional)
+te    RFC2822 e-mail format date/time             datetime
+      e.g. Mon, 20 Jan 1972 10:21:36 +1000
+tg    Global (day/month) format date/time         datetime
+      e.g. 20/1/1972 10:21:36 AM +1:00
+ta    US (month/day) format date/time             datetime
+      e.g. 1/20/1972 10:21:36 PM +10:30
+tc    ctime() format date/time                    datetime
+      e.g. Sun Sep 16 01:03:52 1973
+th    HTTP log format date/time                   datetime
+      e.g. 21/Nov/2011:00:07:11 +0000
+ts    Linux system log format date/time           datetime
+      e.g. Nov  9 03:37:44
+tt    Time                                        time
+      e.g. 10:21:36 PM -5:30
+===== =========================================== ========
+
+Some examples of typed parsing with ``None`` returned if the typing
+does not match:
+
+>>> parse('Our {:d} {:w} are...', 'Our 3 weapons are...')
+<Result (3, 'weapons') {}>
+>>> parse('Our {:d} {:w} are...', 'Our three weapons are...')
+>>> parse('Meet at {:tg}', 'Meet at 1/2/2011 11:00 PM')
+<Result (datetime.datetime(2011, 2, 1, 23, 0),) {}>
+
+And messing about with alignment:
+
+>>> parse('with {:>} herring', 'with     a herring')
+<Result ('a',) {}>
+>>> parse('spam {:^} spam', 'spam    lovely     spam')
+<Result ('lovely',) {}>
+
+Note that the "center" alignment does not test to make sure the value is
+centered - it just strips leading and trailing whitespace.
+
+Width and precision may be used to restrict the size of matched text
+from the input. Width specifies a minimum size and precision specifies
+a maximum. For example:
+
+>>> parse('{:.2}{:.2}', 'look')           # specifying precision
+<Result ('lo', 'ok') {}>
+>>> parse('{:4}{:4}', 'look at that')     # specifying width
+<Result ('look', 'at that') {}>
+>>> parse('{:4}{:.4}', 'look at that')    # specifying both
+<Result ('look at ', 'that') {}>
+>>> parse('{:2d}{:2d}', '0440')           # parsing two contiguous numbers
+<Result (4, 40) {}>
+
+Some notes for the date and time types:
+
+- the presence of the time part is optional (including ISO 8601, starting
+  at the "T"). A full datetime object will always be returned; the time
+  will be set to 00:00:00. You may also specify a time without seconds.
+- when a seconds amount is present in the input fractions will be parsed
+  to give microseconds.
+- except in ISO 8601 the day and month digits may be 0-padded.
+- the date separator for the tg and ta formats may be "-" or "/".
+- named months (abbreviations or full names) may be used in the ta and tg
+  formats in place of numeric months.
+- as per RFC 2822 the e-mail format may omit the day (and comma), and the
+  seconds but nothing else.
+- hours greater than 12 will be happily accepted.
+- the AM/PM are optional, and if PM is found then 12 hours will be added
+  to the datetime object's hours amount - even if the hour is greater
+  than 12 (for consistency.)
+- in ISO 8601 the "Z" (UTC) timezone part may be a numeric offset
+- timezones are specified as "+HH:MM" or "-HH:MM". The hour may be one or two
+  digits (0-padded is OK.) Also, the ":" is optional.
+- the timezone is optional in all except the e-mail format (it defaults to
+  UTC.)
+- named timezones are not handled yet.
+
+Note: attempting to match too many datetime fields in a single parse() will
+currently result in a resource allocation issue. A TooManyFields exception
+will be raised in this instance. The current limit is about 15. It is hoped
+that this limit will be removed one day.
+
+.. _`Format String Syntax`:
+  http://docs.python.org/library/string.html#format-string-syntax
+.. _`Format Specification Mini-Language`:
+  http://docs.python.org/library/string.html#format-specification-mini-language
+
+
+Result and Match Objects
+------------------------
+
+The result of a ``parse()`` and ``search()`` operation is either ``None`` (no match), a
+``Result`` instance or a ``Match`` instance if ``evaluate_result`` is False.
+
+The ``Result`` instance has three attributes:
+
+fixed
+   A tuple of the fixed-position, anonymous fields extracted from the input.
+named
+   A dictionary of the named fields extracted from the input.
+spans
+   A dictionary mapping the names and fixed position indices matched to a
+   2-tuple slice range of where the match occurred in the input.
+   The span does not include any stripped padding (alignment or width).
+
+The ``Match`` instance has one method:
+
+evaluate_result()
+   Generates and returns a ``Result`` instance for this ``Match`` object.
+
+
+
+Custom Type Conversions
+-----------------------
+
+If you wish to have matched fields automatically converted to your own type you
+may pass in a dictionary of type conversion information to ``parse()`` and
+``compile()``.
+
+The converter will be passed the field string matched. Whatever it returns
+will be substituted in the ``Result`` instance for that field.
+
+Your custom type conversions may override the builtin types if you supply one
+with the same identifier.
+
+>>> def shouty(string):
+...    return string.upper()
+...
+>>> parse('{:shouty} world', 'hello world', dict(shouty=shouty))
+<Result ('HELLO',) {}>
+
+If the type converter has the optional ``pattern`` attribute, it is used as
+regular expression for better pattern matching (instead of the default one).
+
+>>> def parse_number(text):
+...    return int(text)
+>>> parse_number.pattern = r'\d+'
+>>> parse('Answer: {number:Number}', 'Answer: 42', dict(Number=parse_number))
+<Result () {'number': 42}>
+>>> _ = parse('Answer: {:Number}', 'Answer: Alice', dict(Number=parse_number))
+>>> assert _ is None, "MISMATCH"
+
+You can also use the ``with_pattern(pattern)`` decorator to add this
+information to a type converter function:
+
+>>> from parse import with_pattern
+>>> @with_pattern(r'\d+')
+... def parse_number(text):
+...    return int(text)
+>>> parse('Answer: {number:Number}', 'Answer: 42', dict(Number=parse_number))
+<Result () {'number': 42}>
+
+A more complete example of a custom type might be:
+
+>>> yesno_mapping = {
+...     "yes":  True,   "no":    False,
+...     "on":   True,   "off":   False,
+...     "true": True,   "false": False,
+... }
+>>> @with_pattern(r"|".join(yesno_mapping))
+... def parse_yesno(text):
+...     return yesno_mapping[text.lower()]
+
+
+If the type converter ``pattern`` uses regex-grouping (with parenthesis),
+you should indicate this by using the optional ``regex_group_count`` parameter
+in the ``with_pattern()`` decorator:
+
+>>> @with_pattern(r'((\d+))', regex_group_count=2)
+... def parse_number2(text):
+...    return int(text)
+>>> parse('Answer: {:Number2} {:Number2}', 'Answer: 42 43', dict(Number2=parse_number2))
+<Result (42, 43) {}>
+
+Otherwise, this may cause parsing problems with unnamed/fixed parameters.
+
+
+Potential Gotchas
+-----------------
+
+`parse()` will always match the shortest text necessary (from left to right)
+to fulfil the parse pattern, so for example:
+
+>>> pattern = '{dir1}/{dir2}'
+>>> data = 'root/parent/subdir'
+>>> sorted(parse(pattern, data).named.items())
+[('dir1', 'root'), ('dir2', 'parent/subdir')]
+
+So, even though `{'dir1': 'root/parent', 'dir2': 'subdir'}` would also fit
+the pattern, the actual match represents the shortest successful match for
+`dir1`.
+
+----
+
+**Version history (in brief)**:
+
+- 1.12.1 Actually use the `case_sensitive` arg in compile (thanks @jacquev6)
+- 1.12.0 Do not assume closing brace when an opening one is found (thanks @mattsep)
+- 1.11.1 Revert having unicode char in docstring, it breaks Bamboo builds(?!)
+- 1.11.0 Implement `__contains__` for Result instances.
+- 1.10.0 Introduce a "letters" matcher, since "w" matches numbers
+  also.
+- 1.9.1 Fix deprecation warnings around backslashes in regex strings
+  (thanks Mickael Schoentgen). Also fix some documentation formatting
+  issues.
+- 1.9.0 We now honor precision and width specifiers when parsing numbers
+  and strings, allowing parsing of concatenated elements of fixed width
+  (thanks Julia Signell)
+- 1.8.4 Add LICENSE file at request of packagers.
+  Correct handling of AM/PM to follow most common interpretation.
+  Correct parsing of hexadecimal that looks like a binary prefix.
+  Add ability to parse case sensitively.
+  Add parsing of numbers to Decimal with "F" (thanks John Vandenberg)
+- 1.8.3 Add regex_group_count to with_pattern() decorator to support
+  user-defined types that contain brackets/parenthesis (thanks Jens Engel)
+- 1.8.2 add documentation for including braces in format string
+- 1.8.1 ensure bare hexadecimal digits are not matched
+- 1.8.0 support manual control over result evaluation (thanks Timo Furrer)
+- 1.7.0 parse dict fields (thanks Mark Visser) and adapted to allow
+  more than 100 re groups in Python 3.5+ (thanks David King)
+- 1.6.6 parse Linux system log dates (thanks Alex Cowan)
+- 1.6.5 handle precision in float format (thanks Levi Kilcher)
+- 1.6.4 handle pipe "|" characters in parse string (thanks Martijn Pieters)
+- 1.6.3 handle repeated instances of named fields, fix bug in PM time
+  overflow
+- 1.6.2 fix logging to use local, not root logger (thanks Necku)
+- 1.6.1 be more flexible regarding matched ISO datetimes and timezones in
+  general, fix bug in timezones without ":" and improve docs
+- 1.6.0 add support for optional ``pattern`` attribute in user-defined types
+  (thanks Jens Engel)
+- 1.5.3 fix handling of question marks
+- 1.5.2 fix type conversion error with dotted names (thanks Sebastian Thiel)
+- 1.5.1 implement handling of named datetime fields
+- 1.5 add handling of dotted field names (thanks Sebastian Thiel)
+- 1.4.1 fix parsing of "0" in int conversion (thanks James Rowe)
+- 1.4 add __getitem__ convenience access on Result.
+- 1.3.3 fix Python 2.5 setup.py issue.
+- 1.3.2 fix Python 3.2 setup.py issue.
+- 1.3.1 fix a couple of Python 3.2 compatibility issues.
+- 1.3 added search() and findall(); removed compile() from ``import *``
+  export as it overwrites builtin.
+- 1.2 added ability for custom and override type conversions to be
+  provided; some cleanup
+- 1.1.9 to keep things simpler number sign is handled automatically;
+  significant robustification in the face of edge-case input.
+- 1.1.8 allow "d" fields to have number base "0x" etc. prefixes;
+  fix up some field type interactions after stress-testing the parser;
+  implement "%" type.
+- 1.1.7 Python 3 compatibility tweaks (2.5 to 2.7 and 3.2 are supported).
+- 1.1.6 add "e" and "g" field types; removed redundant "h" and "X";
+  removed need for explicit "#".
+- 1.1.5 accept textual dates in more places; Result now holds match span
+  positions.
+- 1.1.4 fixes to some int type conversion; implemented "=" alignment; added
+  date/time parsing with a variety of formats handled.
+- 1.1.3 type conversion is automatic based on specified field types. Also added
+  "f" and "n" types.
+- 1.1.2 refactored, added compile() and limited ``from parse import *``
+- 1.1.1 documentation improvements
+- 1.1.0 implemented more of the `Format Specification Mini-Language`_
+  and removed the restriction on mixing fixed-position and named fields
+- 1.0.0 initial release
+
+This code is copyright 2012-2019 Richard Jones <richard@python.org>
+See the end of the source file for the license of use.
+'''
+
+from __future__ import absolute_import
+__version__ = '1.12.1'
+
+# yes, I now have two problems
+import re
+import sys
+from datetime import datetime, time, tzinfo, timedelta
+from decimal import Decimal
+from functools import partial
+import logging
+
+__all__ = 'parse search findall with_pattern'.split()
+
+log = logging.getLogger(__name__)
+
+
+def with_pattern(pattern, regex_group_count=None):
+    r"""Attach a regular expression pattern matcher to a custom type converter
+    function.
+
+    This annotates the type converter with the :attr:`pattern` attribute.
+
+    EXAMPLE:
+        >>> import parse
+        >>> @parse.with_pattern(r"\d+")
+        ... def parse_number(text):
+        ...     return int(text)
+
+    is equivalent to:
+
+        >>> def parse_number(text):
+        ...     return int(text)
+        >>> parse_number.pattern = r"\d+"
+
+    :param pattern: regular expression pattern (as text)
+    :param regex_group_count: Indicates how many regex-groups are in pattern.
+    :return: wrapped function
+    """
+    def decorator(func):
+        func.pattern = pattern
+        func.regex_group_count = regex_group_count
+        return func
+    return decorator
+
+
+def int_convert(base):
+    '''Convert a string to an integer.
+
+    The string may start with a sign.
+
+    It may be of a base other than 10.
+
+    If may start with a base indicator, 0#nnnn, which we assume should
+    override the specified base.
+
+    It may also have other non-numeric characters that we can ignore.
+    '''
+    CHARS = '0123456789abcdefghijklmnopqrstuvwxyz'
+
+    def f(string, match, base=base):
+        if string[0] == '-':
+            sign = -1
+        else:
+            sign = 1
+
+        if string[0] == '0' and len(string) > 2:
+            if string[1] in 'bB':
+                base = 2
+            elif string[1] in 'oO':
+                base = 8
+            elif string[1] in 'xX':
+                base = 16
+            else:
+                # just go with the base specifed
+                pass
+
+        chars = CHARS[:base]
+        string = re.sub('[^%s]' % chars, '', string.lower())
+        return sign * int(string, base)
+    return f
+
+
+def percentage(string, match):
+    return float(string[:-1]) / 100.
+
+
+class FixedTzOffset(tzinfo):
+    """Fixed offset in minutes east from UTC.
+    """
+    ZERO = timedelta(0)
+
+    def __init__(self, offset, name):
+        self._offset = timedelta(minutes=offset)
+        self._name = name
+
+    def __repr__(self):
+        return '<%s %s %s>' % (self.__class__.__name__, self._name,
+            self._offset)
+
+    def utcoffset(self, dt):
+        return self._offset
+
+    def tzname(self, dt):
+        return self._name
+
+    def dst(self, dt):
+        return self.ZERO
+
+    def __eq__(self, other):
+        return self._name == other._name and self._offset == other._offset
+
+
+MONTHS_MAP = dict(
+    Jan=1, January=1,
+    Feb=2, February=2,
+    Mar=3, March=3,
+    Apr=4, April=4,
+    May=5,
+    Jun=6, June=6,
+    Jul=7, July=7,
+    Aug=8, August=8,
+    Sep=9, September=9,
+    Oct=10, October=10,
+    Nov=11, November=11,
+    Dec=12, December=12
+)
+DAYS_PAT = r'(Mon|Tue|Wed|Thu|Fri|Sat|Sun)'
+MONTHS_PAT = r'(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)'
+ALL_MONTHS_PAT = r'(%s)' % '|'.join(MONTHS_MAP)
+TIME_PAT = r'(\d{1,2}:\d{1,2}(:\d{1,2}(\.\d+)?)?)'
+AM_PAT = r'(\s+[AP]M)'
+TZ_PAT = r'(\s+[-+]\d\d?:?\d\d)'
+
+
+def date_convert(string, match, ymd=None, mdy=None, dmy=None,
+        d_m_y=None, hms=None, am=None, tz=None, mm=None, dd=None):
+    '''Convert the incoming string containing some date / time info into a
+    datetime instance.
+    '''
+    groups = match.groups()
+    time_only = False
+    if mm and dd:
+        y=datetime.today().year
+        m=groups[mm]
+        d=groups[dd]
+    elif ymd is not None:
+        y, m, d = re.split(r'[-/\s]', groups[ymd])
+    elif mdy is not None:
+        m, d, y = re.split(r'[-/\s]', groups[mdy])
+    elif dmy is not None:
+        d, m, y = re.split(r'[-/\s]', groups[dmy])
+    elif d_m_y is not None:
+        d, m, y = d_m_y
+        d = groups[d]
+        m = groups[m]
+        y = groups[y]
+    else:
+        time_only = True
+
+    H = M = S = u = 0
+    if hms is not None and groups[hms]:
+        t = groups[hms].split(':')
+        if len(t) == 2:
+            H, M = t
+        else:
+            H, M, S = t
+            if '.' in S:
+                S, u = S.split('.')
+                u = int(float('.' + u) * 1000000)
+            S = int(S)
+        H = int(H)
+        M = int(M)
+
+    if am is not None:
+        am = groups[am]
+        if am:
+            am = am.strip()
+        if am == 'AM' and H == 12:
+            # correction for "12" hour functioning as "0" hour: 12:15 AM = 00:15 by 24 hr clock
+            H -= 12
+        elif am == 'PM' and H == 12:
+            # no correction needed: 12PM is midday, 12:00 by 24 hour clock
+            pass
+        elif am == 'PM':
+            H += 12
+
+    if tz is not None:
+        tz = groups[tz]
+    if tz == 'Z':
+        tz = FixedTzOffset(0, 'UTC')
+    elif tz:
+        tz = tz.strip()
+        if tz.isupper():
+            # TODO use the awesome python TZ module?
+            pass
+        else:
+            sign = tz[0]
+            if ':' in tz:
+                tzh, tzm = tz[1:].split(':')
+            elif len(tz) == 4:  # 'snnn'
+                tzh, tzm = tz[1], tz[2:4]
+            else:
+                tzh, tzm = tz[1:3], tz[3:5]
+            offset = int(tzm) + int(tzh) * 60
+            if sign == '-':
+                offset = -offset
+            tz = FixedTzOffset(offset, tz)
+
+    if time_only:
+        d = time(H, M, S, u, tzinfo=tz)
+    else:
+        y = int(y)
+        if m.isdigit():
+            m = int(m)
+        else:
+            m = MONTHS_MAP[m]
+        d = int(d)
+        d = datetime(y, m, d, H, M, S, u, tzinfo=tz)
+
+    return d
+
+
+class TooManyFields(ValueError):
+    pass
+
+
+class RepeatedNameError(ValueError):
+    pass
+
+
+# note: {} are handled separately
+# note: I don't use r'' here because Sublime Text 2 syntax highlight has a fit
+REGEX_SAFETY = re.compile(r'([?\\\\.[\]()*+\^$!\|])')
+
+# allowed field types
+ALLOWED_TYPES = set(list('nbox%fFegwWdDsSl') +
+    ['t' + c for c in 'ieahgcts'])
+
+
+def extract_format(format, extra_types):
+    '''Pull apart the format [[fill]align][0][width][.precision][type]
+    '''
+    fill = align = None
+    if format[0] in '<>=^':
+        align = format[0]
+        format = format[1:]
+    elif len(format) > 1 and format[1] in '<>=^':
+        fill = format[0]
+        align = format[1]
+        format = format[2:]
+
+    zero = False
+    if format and format[0] == '0':
+        zero = True
+        format = format[1:]
+
+    width = ''
+    while format:
+        if not format[0].isdigit():
+            break
+        width += format[0]
+        format = format[1:]
+
+    if format.startswith('.'):
+        # Precision isn't needed but we need to capture it so that
+        # the ValueError isn't raised.
+        format = format[1:]  # drop the '.'
+        precision = ''
+        while format:
+            if not format[0].isdigit():
+                break
+            precision += format[0]
+            format = format[1:]
+
+    # the rest is the type, if present
+    type = format
+    if type and type not in ALLOWED_TYPES and type not in extra_types:
+        raise ValueError('format spec %r not recognised' % type)
+
+    return locals()
+
+
+PARSE_RE = re.compile(r"""({{|}}|{\w*(?:(?:\.\w+)|(?:\[[^\]]+\]))*(?::[^}]+)?})""")
+
+
+class Parser(object):
+    '''Encapsulate a format string that may be used to parse other strings.
+    '''
+    def __init__(self, format, extra_types=None, case_sensitive=False):
+        # a mapping of a name as in {hello.world} to a regex-group compatible
+        # name, like hello__world Its used to prevent the transformation of
+        # name-to-group and group to name to fail subtly, such as in:
+        # hello_.world-> hello___world->hello._world
+        self._group_to_name_map = {}
+        # also store the original field name to group name mapping to allow
+        # multiple instances of a name in the format string
+        self._name_to_group_map = {}
+        # and to sanity check the repeated instances store away the first
+        # field type specification for the named field
+        self._name_types = {}
+
+        self._format = format
+        if extra_types is None:
+            extra_types = {}
+        self._extra_types = extra_types
+        if case_sensitive:
+            self._re_flags = re.DOTALL
+        else:
+            self._re_flags = re.IGNORECASE | re.DOTALL
+        self._fixed_fields = []
+        self._named_fields = []
+        self._group_index = 0
+        self._type_conversions = {}
+        self._expression = self._generate_expression()
+        self.__search_re = None
+        self.__match_re = None
+
+        log.debug('format %r -> %r', format, self._expression)
+
+    def __repr__(self):
+        if len(self._format) > 20:
+            return '<%s %r>' % (self.__class__.__name__,
+                self._format[:17] + '...')
+        return '<%s %r>' % (self.__class__.__name__, self._format)
+
+    @property
+    def _search_re(self):
+        if self.__search_re is None:
+            try:
+                self.__search_re = re.compile(self._expression, self._re_flags)
+            except AssertionError:
+                # access error through sys to keep py3k and backward compat
+                e = str(sys.exc_info()[1])
+                if e.endswith('this version only supports 100 named groups'):
+                    raise TooManyFields('sorry, you are attempting to parse '
+                        'too many complex fields')
+        return self.__search_re
+
+    @property
+    def _match_re(self):
+        if self.__match_re is None:
+            expression = r'^%s$' % self._expression
+            try:
+                self.__match_re = re.compile(expression, self._re_flags)
+            except AssertionError:
+                # access error through sys to keep py3k and backward compat
+                e = str(sys.exc_info()[1])
+                if e.endswith('this version only supports 100 named groups'):
+                    raise TooManyFields('sorry, you are attempting to parse '
+                        'too many complex fields')
+            except re.error:
+                raise NotImplementedError("Group names (e.g. (?P<name>) can "
+                    "cause failure, as they are not escaped properly: '%s'" %
+                    expression)
+        return self.__match_re
+
+    def parse(self, string, evaluate_result=True):
+        '''Match my format to the string exactly.
+
+        Return a Result or Match instance or None if there's no match.
+        '''
+        m = self._match_re.match(string)
+        if m is None:
+            return None
+
+        if evaluate_result:
+            return self.evaluate_result(m)
+        else:
+            return Match(self, m)
+
+    def search(self, string, pos=0, endpos=None, evaluate_result=True):
+        '''Search the string for my format.
+
+        Optionally start the search at "pos" character index and limit the
+        search to a maximum index of endpos - equivalent to
+        search(string[:endpos]).
+
+        If the ``evaluate_result`` argument is set to ``False`` a
+        Match instance is returned instead of the actual Result instance.
+
+        Return either a Result instance or None if there's no match.
+        '''
+        if endpos is None:
+            endpos = len(string)
+        m = self._search_re.search(string, pos, endpos)
+        if m is None:
+            return None
+
+        if evaluate_result:
+            return self.evaluate_result(m)
+        else:
+            return Match(self, m)
+
+    def findall(self, string, pos=0, endpos=None, extra_types=None, evaluate_result=True):
+        '''Search "string" for all occurrences of "format".
+
+        Optionally start the search at "pos" character index and limit the
+        search to a maximum index of endpos - equivalent to
+        search(string[:endpos]).
+
+        Returns an iterator that holds Result or Match instances for each format match
+        found.
+        '''
+        if endpos is None:
+            endpos = len(string)
+        return ResultIterator(self, string, pos, endpos, evaluate_result=evaluate_result)
+
+    def _expand_named_fields(self, named_fields):
+        result = {}
+        for field, value in named_fields.items():
+            # split 'aaa[bbb][ccc]...' into 'aaa' and '[bbb][ccc]...'
+            basename, subkeys = re.match(r'([^\[]+)(.*)', field).groups()
+
+            # create nested dictionaries {'aaa': {'bbb': {'ccc': ...}}}
+            d = result
+            k = basename
+
+            if subkeys:
+                for subkey in re.findall(r'\[[^\]]+\]', subkeys):
+                    d = d.setdefault(k,{})
+                    k = subkey[1:-1]
+
+            # assign the value to the last key
+            d[k] = value
+
+        return result
+
+    def evaluate_result(self, m):
+        '''Generate a Result instance for the given regex match object'''
+        # ok, figure the fixed fields we've pulled out and type convert them
+        fixed_fields = list(m.groups())
+        for n in self._fixed_fields:
+            if n in self._type_conversions:
+                fixed_fields[n] = self._type_conversions[n](fixed_fields[n], m)
+        fixed_fields = tuple(fixed_fields[n] for n in self._fixed_fields)
+
+        # grab the named fields, converting where requested
+        groupdict = m.groupdict()
+        named_fields = {}
+        name_map = {}
+        for k in self._named_fields:
+            korig = self._group_to_name_map[k]
+            name_map[korig] = k
+            if k in self._type_conversions:
+                value = self._type_conversions[k](groupdict[k], m)
+            else:
+                value = groupdict[k]
+
+            named_fields[korig] = value
+
+        # now figure the match spans
+        spans = dict((n, m.span(name_map[n])) for n in named_fields)
+        spans.update((i, m.span(n + 1))
+            for i, n in enumerate(self._fixed_fields))
+
+        # and that's our result
+        return Result(fixed_fields, self._expand_named_fields(named_fields), spans)
+
+    def _regex_replace(self, match):
+        return '\\' + match.group(1)
+
+    def _generate_expression(self):
+        # turn my _format attribute into the _expression attribute
+        e = []
+        for part in PARSE_RE.split(self._format):
+            if not part:
+                continue
+            elif part == '{{':
+                e.append(r'\{')
+            elif part == '}}':
+                e.append(r'\}')
+            elif part[0] == '{' and part[-1] == '}':
+                # this will be a braces-delimited field to handle
+                e.append(self._handle_field(part))
+            else:
+                # just some text to match
+                e.append(REGEX_SAFETY.sub(self._regex_replace, part))
+        return ''.join(e)
+
+    def _to_group_name(self, field):
+        # return a version of field which can be used as capture group, even
+        # though it might contain '.'
+        group = field.replace('.', '_').replace('[', '_').replace(']', '_')
+
+        # make sure we don't collide ("a.b" colliding with "a_b")
+        n = 1
+        while group in self._group_to_name_map:
+            n += 1
+            if '.' in field:
+                group = field.replace('.', '_' * n)
+            elif '_' in field:
+                group = field.replace('_', '_' * n)
+            else:
+                raise KeyError('duplicated group name %r' % (field,))
+
+        # save off the mapping
+        self._group_to_name_map[group] = field
+        self._name_to_group_map[field] = group
+        return group
+
+    def _handle_field(self, field):
+        # first: lose the braces
+        field = field[1:-1]
+
+        # now figure whether this is an anonymous or named field, and whether
+        # there's any format specification
+        format = ''
+        if field and field[0].isalpha():
+            if ':' in field:
+                name, format = field.split(':')
+            else:
+                name = field
+            if name in self._name_to_group_map:
+                if self._name_types[name] != format:
+                    raise RepeatedNameError('field type %r for field "%s" '
+                        'does not match previous seen type %r' % (format,
+                        name, self._name_types[name]))
+                group = self._name_to_group_map[name]
+                # match previously-seen value
+                return r'(?P=%s)' % group
+            else:
+                group = self._to_group_name(name)
+                self._name_types[name] = format
+            self._named_fields.append(group)
+            # this will become a group, which must not contain dots
+            wrap = r'(?P<%s>%%s)' % group
+        else:
+            self._fixed_fields.append(self._group_index)
+            wrap = r'(%s)'
+            if ':' in field:
+                format = field[1:]
+            group = self._group_index
+
+        # simplest case: no type specifier ({} or {name})
+        if not format:
+            self._group_index += 1
+            return wrap % r'.+?'
+
+        # decode the format specification
+        format = extract_format(format, self._extra_types)
+
+        # figure type conversions, if any
+        type = format['type']
+        is_numeric = type and type in 'n%fegdobh'
+        if type in self._extra_types:
+            type_converter = self._extra_types[type]
+            s = getattr(type_converter, 'pattern', r'.+?')
+            regex_group_count = getattr(type_converter, 'regex_group_count', 0)
+            if regex_group_count is None:
+                regex_group_count = 0
+            self._group_index += regex_group_count
+
+            def f(string, m):
+                return type_converter(string)
+            self._type_conversions[group] = f
+        elif type == 'n':
+            s = r'\d{1,3}([,.]\d{3})*'
+            self._group_index += 1
+            self._type_conversions[group] = int_convert(10)
+        elif type == 'b':
+            s = r'(0[bB])?[01]+'
+            self._type_conversions[group] = int_convert(2)
+            self._group_index += 1
+        elif type == 'o':
+            s = r'(0[oO])?[0-7]+'
+            self._type_conversions[group] = int_convert(8)
+            self._group_index += 1
+        elif type == 'x':
+            s = r'(0[xX])?[0-9a-fA-F]+'
+            self._type_conversions[group] = int_convert(16)
+            self._group_index += 1
+        elif type == '%':
+            s = r'\d+(\.\d+)?%'
+            self._group_index += 1
+            self._type_conversions[group] = percentage
+        elif type == 'f':
+            s = r'\d+\.\d+'
+            self._type_conversions[group] = lambda s, m: float(s)
+        elif type == 'F':
+            s = r'\d+\.\d+'
+            self._type_conversions[group] = lambda s, m: Decimal(s)
+        elif type == 'e':
+            s = r'\d+\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF'
+            self._type_conversions[group] = lambda s, m: float(s)
+        elif type == 'g':
+            s = r'\d+(\.\d+)?([eE][-+]?\d+)?|nan|NAN|[-+]?inf|[-+]?INF'
+            self._group_index += 2
+            self._type_conversions[group] = lambda s, m: float(s)
+        elif type == 'd':
+            if format.get('width'):
+                width = r'{1,%s}' % int(format['width'])
+            else:
+                width = '+'
+            s = r'\d{w}|0[xX][0-9a-fA-F]{w}|0[bB][01]{w}|0[oO][0-7]{w}'.format(w=width)
+            self._type_conversions[group] = int_convert(10)
+        elif type == 'ti':
+            s = r'(\d{4}-\d\d-\d\d)((\s+|T)%s)?(Z|\s*[-+]\d\d:?\d\d)?' % \
+                TIME_PAT
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, ymd=n + 1,
+                hms=n + 4, tz=n + 7)
+            self._group_index += 7
+        elif type == 'tg':
+            s = r'(\d{1,2}[-/](\d{1,2}|%s)[-/]\d{4})(\s+%s)?%s?%s?' % (
+                ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT)
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, dmy=n + 1,
+                hms=n + 5, am=n + 8, tz=n + 9)
+            self._group_index += 9
+        elif type == 'ta':
+            s = r'((\d{1,2}|%s)[-/]\d{1,2}[-/]\d{4})(\s+%s)?%s?%s?' % (
+                ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT)
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, mdy=n + 1,
+                hms=n + 5, am=n + 8, tz=n + 9)
+            self._group_index += 9
+        elif type == 'te':
+            # this will allow microseconds through if they're present, but meh
+            s = r'(%s,\s+)?(\d{1,2}\s+%s\s+\d{4})\s+%s%s' % (DAYS_PAT,
+                MONTHS_PAT, TIME_PAT, TZ_PAT)
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, dmy=n + 3,
+                hms=n + 5, tz=n + 8)
+            self._group_index += 8
+        elif type == 'th':
+            # slight flexibility here from the stock Apache format
+            s = r'(\d{1,2}[-/]%s[-/]\d{4}):%s%s' % (MONTHS_PAT, TIME_PAT,
+                TZ_PAT)
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, dmy=n + 1,
+                hms=n + 3, tz=n + 6)
+            self._group_index += 6
+        elif type == 'tc':
+            s = r'(%s)\s+%s\s+(\d{1,2})\s+%s\s+(\d{4})' % (
+                DAYS_PAT, MONTHS_PAT, TIME_PAT)
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert,
+                d_m_y=(n + 4, n + 3, n + 8), hms=n + 5)
+            self._group_index += 8
+        elif type == 'tt':
+            s = r'%s?%s?%s?' % (TIME_PAT, AM_PAT, TZ_PAT)
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, hms=n + 1,
+                am=n + 4, tz=n + 5)
+            self._group_index += 5
+        elif type == 'ts':
+            s = r'%s(\s+)(\d+)(\s+)(\d{1,2}:\d{1,2}:\d{1,2})?' % MONTHS_PAT
+            n = self._group_index
+            self._type_conversions[group] = partial(date_convert, mm=n+1, dd=n+3,
+                hms=n + 5)
+            self._group_index += 5
+        elif type == 'l':
+            s = r'[A-Za-z]+'
+        elif type:
+            s = r'\%s+' % type
+        elif format.get('precision'):
+            if format.get('width'):
+                s = r'.{%s,%s}?' % (format['width'], format['precision'])
+            else:
+                s = r'.{1,%s}?' % format['precision']
+        elif format.get('width'):
+            s = r'.{%s,}?' % format['width']
+        else:
+            s = r'.+?'
+
+        align = format['align']
+        fill = format['fill']
+
+        # handle some numeric-specific things like fill and sign
+        if is_numeric:
+            # prefix with something (align "=" trumps zero)
+            if align == '=':
+                # special case - align "=" acts like the zero above but with
+                # configurable fill defaulting to "0"
+                if not fill:
+                    fill = '0'
+                s = r'%s*' % fill + s
+
+            # allow numbers to be prefixed with a sign
+            s = r'[-+ ]?' + s
+
+        if not fill:
+            fill = ' '
+
+        # Place into a group now - this captures the value we want to keep.
+        # Everything else from now is just padding to be stripped off
+        if wrap:
+            s = wrap % s
+            self._group_index += 1
+
+        if format['width']:
+            # all we really care about is that if the format originally
+            # specified a width then there will probably be padding - without
+            # an explicit alignment that'll mean right alignment with spaces
+            # padding
+            if not align:
+                align = '>'
+
+        if fill in r'.\+?*[](){}^$':
+            fill = '\\' + fill
+
+        # align "=" has been handled
+        if align == '<':
+            s = '%s%s*' % (s, fill)
+        elif align == '>':
+            s = '%s*%s' % (fill, s)
+        elif align == '^':
+            s = '%s*%s%s*' % (fill, s, fill)
+
+        return s
+
+
+class Result(object):
+    '''The result of a parse() or search().
+
+    Fixed results may be looked up using `result[index]`.
+
+    Named results may be looked up using `result['name']`.
+
+    Named results may be tested for existence using `'name' in result`.
+    '''
+    def __init__(self, fixed, named, spans):
+        self.fixed = fixed
+        self.named = named
+        self.spans = spans
+
+    def __getitem__(self, item):
+        if isinstance(item, int):
+            return self.fixed[item]
+        return self.named[item]
+
+    def __repr__(self):
+        return '<%s %r %r>' % (self.__class__.__name__, self.fixed,
+            self.named)
+
+    def __contains__(self, name):
+        return name in self.named
+
+
+class Match(object):
+    '''The result of a parse() or search() if no results are generated.
+
+    This class is only used to expose internal used regex match objects
+    to the user and use them for external Parser.evaluate_result calls.
+    '''
+    def __init__(self, parser, match):
+        self.parser = parser
+        self.match = match
+
+    def evaluate_result(self):
+        '''Generate results for this Match'''
+        return self.parser.evaluate_result(self.match)
+
+
+class ResultIterator(object):
+    '''The result of a findall() operation.
+
+    Each element is a Result instance.
+    '''
+    def __init__(self, parser, string, pos, endpos, evaluate_result=True):
+        self.parser = parser
+        self.string = string
+        self.pos = pos
+        self.endpos = endpos
+        self.evaluate_result = evaluate_result
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        m = self.parser._search_re.search(self.string, self.pos, self.endpos)
+        if m is None:
+            raise StopIteration()
+        self.pos = m.end()
+
+        if self.evaluate_result:
+            return self.parser.evaluate_result(m)
+        else:
+            return Match(self.parser, m)
+
+    # pre-py3k compat
+    next = __next__
+
+
+def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False):
+    '''Using "format" attempt to pull values from "string".
+
+    The format must match the string contents exactly. If the value
+    you're looking for is instead just a part of the string use
+    search().
+
+    If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
+
+     .fixed - tuple of fixed-position values from the string
+     .named - dict of named values from the string
+
+    If ``evaluate_result`` is False the return value will be a Match instance with one method:
+
+     .evaluate_result() - This will return a Result instance like you would get
+                          with ``evaluate_result`` set to True
+
+    The default behaviour is to match strings case insensitively. You may match with
+    case by specifying case_sensitive=True.
+
+    If the format is invalid a ValueError will be raised.
+
+    See the module documentation for the use of "extra_types".
+
+    In the case there is no match parse() will return None.
+    '''
+    p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
+    return p.parse(string, evaluate_result=evaluate_result)
+
+
+def search(format, string, pos=0, endpos=None, extra_types=None, evaluate_result=True,
+        case_sensitive=False):
+    '''Search "string" for the first occurrence of "format".
+
+    The format may occur anywhere within the string. If
+    instead you wish for the format to exactly match the string
+    use parse().
+
+    Optionally start the search at "pos" character index and limit the search
+    to a maximum index of endpos - equivalent to search(string[:endpos]).
+
+    If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
+
+     .fixed - tuple of fixed-position values from the string
+     .named - dict of named values from the string
+
+    If ``evaluate_result`` is False the return value will be a Match instance with one method:
+
+     .evaluate_result() - This will return a Result instance like you would get
+                          with ``evaluate_result`` set to True
+
+    The default behaviour is to match strings case insensitively. You may match with
+    case by specifying case_sensitive=True.
+
+    If the format is invalid a ValueError will be raised.
+
+    See the module documentation for the use of "extra_types".
+
+    In the case there is no match parse() will return None.
+    '''
+    p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
+    return p.search(string, pos, endpos, evaluate_result=evaluate_result)
+
+
+def findall(format, string, pos=0, endpos=None, extra_types=None, evaluate_result=True,
+        case_sensitive=False):
+    '''Search "string" for all occurrences of "format".
+
+    You will be returned an iterator that holds Result instances
+    for each format match found.
+
+    Optionally start the search at "pos" character index and limit the search
+    to a maximum index of endpos - equivalent to search(string[:endpos]).
+
+    If ``evaluate_result`` is True each returned Result instance has two attributes:
+
+     .fixed - tuple of fixed-position values from the string
+     .named - dict of named values from the string
+
+    If ``evaluate_result`` is False each returned value is a Match instance with one method:
+
+     .evaluate_result() - This will return a Result instance like you would get
+                          with ``evaluate_result`` set to True
+
+    The default behaviour is to match strings case insensitively. You may match with
+    case by specifying case_sensitive=True.
+
+    If the format is invalid a ValueError will be raised.
+
+    See the module documentation for the use of "extra_types".
+    '''
+    p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
+    return Parser(format, extra_types=extra_types).findall(string, pos, endpos, evaluate_result=evaluate_result)
+
+
+def compile(format, extra_types=None, case_sensitive=False):
+    '''Create a Parser instance to parse "format".
+
+    The resultant Parser has a method .parse(string) which
+    behaves in the same manner as parse(format, string).
+
+    The default behaviour is to match strings case insensitively. You may match with
+    case by specifying case_sensitive=True.
+
+    Use this function if you intend to parse many strings
+    with the same format.
+
+    See the module documentation for the use of "extra_types".
+
+    Returns a Parser instance.
+    '''
+    return Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
+
+
+# Copyright (c) 2012-2019 Richard Jones <richard@python.org>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# vim: set filetype=python ts=4 sw=4 et si tw=75
diff --git a/parse_type/parse_util.py b/parse_type/parse_util.py
new file mode 100644
index 0000000..0e5ee73
--- /dev/null
+++ b/parse_type/parse_util.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=missing-docstring
+"""
+Provides generic utility classes for the :class:`parse.Parser` class.
+"""
+
+from __future__ import absolute_import
+from collections import namedtuple
+import parse
+import six
+
+
+# -- HELPER-CLASS: For format part in a Field.
+# REQUIRES: Python 2.6 or newer.
+# pylint: disable=redefined-builtin, too-many-arguments
+FormatSpec = namedtuple("FormatSpec",
+                        ["type", "width", "zero", "align", "fill", "precision"])
+
+def make_format_spec(type=None, width="", zero=False, align=None, fill=None,
+                     precision=None):
+    return FormatSpec(type, width, zero, align, fill, precision)
+# pylint: enable=redefined-builtin
+
+class Field(object):
+    """
+    Provides a ValueObject for a Field in a parse expression.
+
+    Examples:
+        * "{}"
+        * "{name}"
+        * "{:format}"
+        * "{name:format}"
+
+    Format specification: [[fill]align][0][width][.precision][type]
+    """
+    # pylint: disable=redefined-builtin
+    ALIGN_CHARS = '<>=^'
+
+    def __init__(self, name="", format=None):
+        self.name = name
+        self.format = format
+        self._format_spec = None
+
+    def set_format(self, format):
+        self.format = format
+        self._format_spec = None
+
+    @property
+    def has_format(self):
+        return bool(self.format)
+
+    @property
+    def format_spec(self):
+        if not self._format_spec and self.format:
+            self._format_spec = self.extract_format_spec(self.format)
+        return self._format_spec
+
+    def __str__(self):
+        name = self.name or ""
+        if self.has_format:
+            return "{%s:%s}" % (name, self.format)
+        return "{%s}" % name
+
+    def __eq__(self, other):
+        if isinstance(other, Field):
+            format1 = self.format or ""
+            format2 = other.format or ""
+            return (self.name == other.name) and (format1 == format2)
+        elif isinstance(other, six.string_types):
+            return str(self) == other
+        else:
+            raise ValueError(other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    @staticmethod
+    def make_format(format_spec):
+        """Build format string from a format specification.
+
+        :param format_spec:     Format specification (as FormatSpec object).
+        :return: Composed format (as string).
+        """
+        fill = ''
+        align = ''
+        zero = ''
+        width = format_spec.width
+        if format_spec.align:
+            align = format_spec.align[0]
+            if format_spec.fill:
+                fill = format_spec.fill[0]
+        if format_spec.zero:
+            zero = '0'
+
+        precision_part = ""
+        if format_spec.precision:
+            precision_part = ".%s" % format_spec.precision
+
+        # -- FORMAT-SPEC: [[fill]align][0][width][.precision][type]
+        return "%s%s%s%s%s%s" % (fill, align, zero, width,
+                                 precision_part, format_spec.type)
+
+
+    @classmethod
+    def extract_format_spec(cls, format):
+        """Pull apart the format: [[fill]align][0][width][.precision][type]"""
+        # -- BASED-ON: parse.extract_format()
+        # pylint: disable=redefined-builtin, unsubscriptable-object
+        if not format:
+            raise ValueError("INVALID-FORMAT: %s (empty-string)" % format)
+
+        orig_format = format
+        fill = align = None
+        if format[0] in cls.ALIGN_CHARS:
+            align = format[0]
+            format = format[1:]
+        elif len(format) > 1 and format[1] in cls.ALIGN_CHARS:
+            fill = format[0]
+            align = format[1]
+            format = format[2:]
+
+        zero = False
+        if format and format[0] == '0':
+            zero = True
+            format = format[1:]
+
+        width = ''
+        while format:
+            if not format[0].isdigit():
+                break
+            width += format[0]
+            format = format[1:]
+
+        precision = None
+        if format.startswith('.'):
+            # Precision isn't needed but we need to capture it so that
+            # the ValueError isn't raised.
+            format = format[1:]  # drop the '.'
+            precision = ''
+            while format:
+                if not format[0].isdigit():
+                    break
+                precision += format[0]
+                format = format[1:]
+
+        # the rest is the type, if present
+        type = format
+        if not type:
+            raise ValueError("INVALID-FORMAT: %s (without type)" % orig_format)
+        return FormatSpec(type, width, zero, align, fill, precision)
+
+
+class FieldParser(object):
+    """
+    Utility class that parses/extracts fields in parse expressions.
+    """
+
+    @classmethod
+    def parse(cls, text):
+        if not (text.startswith('{') and text.endswith('}')):
+            message = "FIELD-SCHEMA MISMATCH: text='%s' (missing braces)" % text
+            raise ValueError(message)
+
+        # first: lose the braces
+        text = text[1:-1]
+        if ':' in text:
+            # -- CASE: Typed field with format.
+            name, format_ = text.split(':')
+        else:
+            name = text
+            format_ = None
+        return Field(name, format_)
+
+    @classmethod
+    def extract_fields(cls, schema):
+        """Extract fields in a parse expression schema.
+
+        :param schema: Parse expression schema/format to use (as string).
+        :return: Generator for fields in schema (as Field objects).
+        """
+        # -- BASED-ON: parse.Parser._generate_expression()
+        for part in parse.PARSE_RE.split(schema):
+            if not part or part == '{{' or part == '}}':
+                continue
+            elif part[0] == '{':
+                # this will be a braces-delimited field to handle
+                yield cls.parse(part)
+
+    @classmethod
+    def extract_types(cls, schema):
+        """Extract types (names) for typed fields (with format/type part).
+
+        :param schema: Parser schema/format to use.
+        :return: Generator for type names (as string).
+        """
+        for field in cls.extract_fields(schema):
+            if field.has_format:
+                yield field.format_spec.type
diff --git a/py.requirements/all.txt b/py.requirements/all.txt
new file mode 100644
index 0000000..d749c60
--- /dev/null
+++ b/py.requirements/all.txt
@@ -0,0 +1,14 @@
+# ============================================================================
+# BEHAVE: PYTHON PACKAGE REQUIREMENTS: All requirements
+# ============================================================================
+# DESCRIPTION:
+#    pip install -r <THIS_FILE>
+#
+# SEE ALSO:
+#  * http://www.pip-installer.org/
+# ============================================================================
+
+-r basic.txt
+-r develop.txt
+-r testing.txt
+-r py26_more.txt
diff --git a/py.requirements/basic.txt b/py.requirements/basic.txt
new file mode 100644
index 0000000..f116c8e
--- /dev/null
+++ b/py.requirements/basic.txt
@@ -0,0 +1,13 @@
+# ============================================================================
+# PYTHON PACKAGE REQUIREMENTS: Normal usage/installation (minimal)
+# ============================================================================
+# DESCRIPTION:
+#    pip install -r <THIS_FILE>
+#
+# SEE ALSO:
+#  * http://www.pip-installer.org/
+# ============================================================================
+
+parse >= 1.8.4
+enum34
+six >= 1.11.0
diff --git a/py.requirements/ci.travis.txt b/py.requirements/ci.travis.txt
new file mode 100644
index 0000000..c1550a9
--- /dev/null
+++ b/py.requirements/ci.travis.txt
@@ -0,0 +1,12 @@
+pytest <  5.0; python_version <  '3.0'
+pytest >= 5.0; python_version >= '3.0'
+pytest-html >= 1.19.0
+
+unittest2;   python_version < '2.7'
+ordereddict; python_version < '2.7'
+
+# -- NEEDED: By some tests (as proof of concept)
+# NOTE: path.py-10.1 is required for python2.6
+# HINT: path.py => path (python-install-package was renamed for python3)
+path.py >= 11.5.0; python_version <  '3.5'
+path >= 13.1.0;    python_version >= '3.5'
diff --git a/py.requirements/develop.txt b/py.requirements/develop.txt
new file mode 100644
index 0000000..ae01ca0
--- /dev/null
+++ b/py.requirements/develop.txt
@@ -0,0 +1,34 @@
+# ============================================================================
+# PYTHON PACKAGE REQUIREMENTS FOR: parse_type -- For development only
+# ============================================================================
+
+# -- DEVELOPMENT SUPPORT:
+invoke >= 1.2.0
+six >= 1.11.0
+pathlib;    python_version <= '3.4'
+
+# -- HINT: path.py => path (python-install-package was renamed for python3)
+path.py >= 11.5.0; python_version <  '3.5'
+path >= 13.1.0;    python_version >= '3.5'
+
+# For cleanup of python files: py.cleanup
+pycmd
+
+# -- PROJECT ADMIN SUPPORT:
+# OLD: bumpversion
+bump2version >= 0.5.6
+
+# -- RELEASE MANAGEMENT: Push package to pypi.
+twine >= 1.13.0
+
+# -- PYTHON2/PYTHON3 COMPATIBILITY:
+modernize >= 0.5
+
+pylint
+
+# -- RELATED:
+-r testing.txt
+-r docs.txt
+
+# -- DISABLED:
+# -r optional.txt
diff --git a/py.requirements/docs.txt b/py.requirements/docs.txt
new file mode 100644
index 0000000..c078269
--- /dev/null
+++ b/py.requirements/docs.txt
@@ -0,0 +1,6 @@
+# ============================================================================
+# PYTHON PACKAGE REQUIREMENTS: For documentation generation
+# ============================================================================
+# sphinxcontrib-cheeseshop >= 0.2
+
+Sphinx >= 1.5
diff --git a/py.requirements/optional.txt b/py.requirements/optional.txt
new file mode 100644
index 0000000..9c9ce3d
--- /dev/null
+++ b/py.requirements/optional.txt
@@ -0,0 +1,7 @@
+# ============================================================================
+# PYTHON PACKAGE REQUIREMENTS FOR: parse_type -- Optional for development
+# ============================================================================
+
+# -- GIT MULTI-REPO TOOL: wstool
+# REQUIRES: wstool >= 0.1.18 (which is not in pypi.org, yet)
+https://github.com/vcstools/wstool/archive/0.1.18.zip
diff --git a/py.requirements/py26_more.txt b/py.requirements/py26_more.txt
new file mode 100644
index 0000000..db072fb
--- /dev/null
+++ b/py.requirements/py26_more.txt
@@ -0,0 +1 @@
+ordereddict;  python_version <= '2.6'
diff --git a/py.requirements/testing.txt b/py.requirements/testing.txt
new file mode 100644
index 0000000..e0ce5b8
--- /dev/null
+++ b/py.requirements/testing.txt
@@ -0,0 +1,17 @@
+# ============================================================================
+# PYTHON PACKAGE REQUIREMENTS FOR: parse_type -- For testing only
+# ============================================================================
+
+pytest >= 4.2
+pytest-html >= 1.16
+pytest-cov
+pytest-runner
+# -- PYTHON 2.6 SUPPORT:
+unittest2;    python_version <= '2.6'
+
+tox   >= 2.8
+coverage >= 4.4
+
+# -- NEEDED-FOR: toxcmd.py
+argparse
+
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..814d764
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,34 @@
+# ============================================================================
+# PYTEST CONFIGURATION FILE: pytest.ini
+# ============================================================================
+# SEE ALSO:
+#  * http://pytest.org/
+#  * http://pytest.org/latest/customize.html
+#  * http://pytest.org/latest/usage.html
+#  * http://pytest.org/latest/example/pythoncollection.html#change-naming-conventions
+# ============================================================================
+# MORE OPTIONS:
+#  addopts =
+#  python_classes=*Test
+#  python_functions=test
+# ============================================================================
+
+[pytest]
+minversion    = 4.2
+testpaths     = tests
+python_files  = test_*.py
+junit_family = xunit2
+addopts = --metadata PACKAGE_UNDER_TEST parse_type
+    --metadata PACKAGE_VERSION 0.5.3
+    --html=build/testing/report.html --self-contained-html
+    --junit-xml=build/testing/report.xml
+# markers =
+#    smoke
+#    slow
+
+# -- PREPARED:
+# filterwarnings =
+#    ignore:.*invalid escape sequence.*:DeprecationWarning
+
+# -- BACKWARD COMPATIBILITY: pytest < 2.8
+norecursedirs = .git .tox build dist .venv* tmp* _*
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..0941bed
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,21 @@
+# -- CONVENIENCE: Use pytest-runner (ptr) as test runner.
+[aliases]
+docs = build_sphinx
+test = pytest
+
+[build_sphinx]
+source-dir = docs/
+build-dir  = build/docs
+builder    = html
+all_files  = true
+
+[easy_install]
+# set the default location to install packages
+# install_dir = eggs
+# find_links = https://github.com/jenisys/parse_type
+
+[upload_docs]
+upload-dir = build/docs/html
+
+[bdist_wheel]
+universal = 1
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..1037e50
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Setup script for "parse_type" package.
+
+USAGE:
+    python setup.py install
+    # OR:
+    pip install .
+
+SEE ALSO:
+
+* https://pypi.org/pypi/parse_type
+* https://github.com/jenisys/parse_type
+
+RELATED:
+
+* https://setuptools.readthedocs.io/en/latest/history.html
+"""
+
+import sys
+import os.path
+sys.path.insert(0, os.curdir)
+
+# -- USE: setuptools
+from setuptools import setup, find_packages
+
+
+# -----------------------------------------------------------------------------
+# PREPARE SETUP:
+# -----------------------------------------------------------------------------
+HERE = os.path.dirname(__file__)
+python_version = float('%s.%s' % sys.version_info[:2])
+
+README = os.path.join(HERE, "README.rst")
+long_description = ''.join(open(README).readlines()[4:])
+extra = dict(
+    tests_require=[
+        "pytest <  5.0; python_version <  '3.0'", # >= 4.2
+        "pytest >= 5.0; python_version >= '3.0'",
+        "pytest-html >= 1.19.0",
+        # -- PYTHON 2.6 SUPPORT:
+        "unittest2; python_version < '2.7'",
+    ],
+)
+
+if python_version >= 3.0:
+    extra["use_2to3"] = True
+
+# -- NICE-TO-HAVE:
+# # FILE: setup.cfg -- Use pytest-runner (ptr) as test runner.
+# [aliases]
+# test = ptr
+# USE_PYTEST_RUNNER = os.environ.get("PYSETUP_TEST", "pytest") == "pytest"
+USE_PYTEST_RUNNER = os.environ.get("PYSETUP_TEST", "no") == "pytest"
+if USE_PYTEST_RUNNER:
+    extra["tests_require"].append("pytest-runner")
+
+# -----------------------------------------------------------------------------
+# UTILITY:
+# -----------------------------------------------------------------------------
+def find_packages_by_root_package(where):
+    """
+    Better than excluding everything that is not needed,
+    collect only what is needed.
+    """
+    root_package = os.path.basename(where)
+    packages = [ "%s.%s" % (root_package, sub_package)
+                 for sub_package in find_packages(where)]
+    packages.insert(0, root_package)
+    return packages
+
+
+# -----------------------------------------------------------------------------
+# SETUP:
+# -----------------------------------------------------------------------------
+setup(
+    name = "parse_type",
+    version = "0.5.3",
+    author = "Jens Engel",
+    author_email = "jenisys@noreply.github.com",
+    url = "https://github.com/jenisys/parse_type",
+    download_url= "http://pypi.python.org/pypi/parse_type",
+    description = "Simplifies to build parse types based on the parse module",
+    long_description = long_description,
+    keywords= "parse, parsing",
+    license = "BSD",
+    packages = find_packages_by_root_package("parse_type"),
+    include_package_data = True,
+
+    # -- REQUIREMENTS:
+    python_requires=">=2.6, !=3.0.*, !=3.1.*",
+    install_requires=[
+        "parse >= 1.9.1",
+        "enum34; python_version < '3.4'",
+        "six >= 1.11",
+        "ordereddict; python_version < '2.7'",
+    ],
+    extras_require={
+        'docs': ["sphinx>=1.2"],
+        'develop': [
+            "coverage >= 4.4",
+            "pytest <  5.0; python_version <  '3.0'", # >= 4.2
+            "pytest >= 5.0; python_version >= '3.0'",
+            "pytest-html >= 1.19.0",
+            "pytest-cov",
+            "tox >= 2.8",
+        ],
+    },
+
+    test_suite = "tests",
+    test_loader = "setuptools.command.test:ScanningLoader",
+    zip_safe = True,
+
+    classifiers = [
+        "Development Status :: 4 - Beta",
+        "Environment :: Console",
+        "Environment :: Web Environment",
+        "Intended Audience :: Developers",
+        "Operating System :: OS Independent",
+        "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3.2",
+        "Programming Language :: Python :: 3.3",
+        "Programming Language :: Python :: 3.4",
+        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+        "Programming Language :: Python :: Implementation :: CPython",
+        "Programming Language :: Python :: Implementation :: PyPy",
+        "Topic :: Software Development :: Code Generators",
+        "Topic :: Software Development :: Libraries :: Python Modules",
+        "License :: OSI Approved :: BSD License",
+    ],
+    platforms = ['any'],
+    **extra
+)
diff --git a/tasks/__init__.py b/tasks/__init__.py
new file mode 100644
index 0000000..9fa55ef
--- /dev/null
+++ b/tasks/__init__.py
@@ -0,0 +1,70 @@
+# -*- coding: UTF-8 -*-
+# pylint: disable=wrong-import-position, wrong-import-order
+"""
+Invoke build script.
+Show all tasks with::
+
+    invoke -l
+
+.. seealso::
+
+    * http://pyinvoke.org
+    * https://github.com/pyinvoke/invoke
+"""
+
+from __future__ import absolute_import
+
+# -----------------------------------------------------------------------------
+# BOOTSTRAP PATH: Use provided vendor bundle if "invoke" is not installed
+# -----------------------------------------------------------------------------
+from . import _setup    # pylint: disable=wrong-import-order
+import os.path
+import sys
+INVOKE_MINVERSION = "1.2.0"
+_setup.setup_path()
+_setup.require_invoke_minversion(INVOKE_MINVERSION)
+
+# -----------------------------------------------------------------------------
+# IMPORTS:
+# -----------------------------------------------------------------------------
+import sys
+from invoke import Collection
+
+# -- TASK-LIBRARY:
+from . import _tasklet_cleanup as cleanup
+from . import test
+from . import release
+# DISABLED: from . import docs
+
+# -----------------------------------------------------------------------------
+# TASKS:
+# -----------------------------------------------------------------------------
+# None
+
+
+# -----------------------------------------------------------------------------
+# TASK CONFIGURATION:
+# -----------------------------------------------------------------------------
+namespace = Collection()
+namespace.add_collection(Collection.from_module(cleanup), name="cleanup")
+namespace.add_collection(Collection.from_module(test))
+namespace.add_collection(Collection.from_module(release))
+# -- DISABLED: namespace.add_collection(Collection.from_module(docs))
+namespace.configure({
+    "tasks": {
+        "auto_dash_names": False
+    }
+})
+
+# -- ENSURE: python cleanup is used for this project.
+cleanup.cleanup_tasks.add_task(cleanup.clean_python)
+
+# -- INJECT: clean configuration into this namespace
+namespace.configure(cleanup.namespace.configuration())
+if sys.platform.startswith("win"):
+    # -- OVERRIDE SETTINGS: For platform=win32, ... (Windows)
+    from ._compat_shutil import which
+    run_settings = dict(echo=True, pty=False, shell=which("cmd"))
+    namespace.configure({"run": run_settings})
+else:
+    namespace.configure({"run": dict(echo=True, pty=True)})
diff --git a/tasks/__main__.py b/tasks/__main__.py
new file mode 100644
index 0000000..637d841
--- /dev/null
+++ b/tasks/__main__.py
@@ -0,0 +1,70 @@
+# -*- coding: UTF-8 -*-
+"""
+Provides "invoke" script when invoke is not installed.
+Note that this approach uses the "tasks/_vendor/invoke.zip" bundle package.
+
+Usage::
+
+    # -- INSTEAD OF: invoke command
+    # Show invoke version
+    python -m tasks --version
+
+    # List all tasks
+    python -m tasks -l
+
+.. seealso::
+
+    * http://pyinvoke.org
+    * https://github.com/pyinvoke/invoke
+
+
+Examples for Invoke Scripts using the Bundle
+-------------------------------------------------------------------------------
+
+For UNIX like platforms:
+
+.. code-block:: sh
+
+    #!/bin/sh
+    #!/bin/bash
+    # RUN INVOKE: From bundled ZIP file (with Bourne shell/bash script).
+    # FILE: invoke.sh (in directory that contains tasks/ directory)
+
+    HERE=$(dirname $0)
+    export INVOKE_TASKS_USE_VENDOR_BUNDLES="yes"
+
+    python ${HERE}/tasks/_vendor/invoke.zip $*
+
+
+For Windows platform:
+
+.. code-block:: bat
+
+    @echo off
+    REM RUN INVOKE: From bundled ZIP file (with Windows Batchfile).
+    REM FILE: invoke.cmd (in directory that contains tasks/ directory)
+
+    setlocal
+    set HERE=%~dp0
+    set INVOKE_TASKS_USE_VENDOR_BUNDLES="yes"
+    if not defined PYTHON   set PYTHON=python
+
+    %PYTHON% %HERE%tasks/_vendor/invoke.zip "%*"
+"""
+
+from __future__ import absolute_import
+import os
+import sys
+
+# -----------------------------------------------------------------------------
+# BOOTSTRAP PATH: Use provided vendor bundle if "invoke" is not installed
+# -----------------------------------------------------------------------------
+# NOTE: tasks/__init__.py performs sys.path setup.
+os.environ["INVOKE_TASKS_USE_VENDOR_BUNDLES"] = "yes"
+
+# -----------------------------------------------------------------------------
+# AUTO-MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == "__main__":
+    from invoke.main import program
+    sys.exit(program.run())
diff --git a/tasks/_compat_shutil.py b/tasks/_compat_shutil.py
new file mode 100644
index 0000000..69f3498
--- /dev/null
+++ b/tasks/_compat_shutil.py
@@ -0,0 +1,8 @@
+# -*- coding: UTF-8 -*-
+# pylint: disable=unused-import
+# PYTHON VERSION COMPATIBILITY HELPER
+
+try:
+    from shutil import which    # -- SINCE: Python 3.3
+except ImportError:
+    from backports.shutil_which import which
diff --git a/tasks/_dry_run.py b/tasks/_dry_run.py
new file mode 100644
index 0000000..cedbdd4
--- /dev/null
+++ b/tasks/_dry_run.py
@@ -0,0 +1,44 @@
+# -*- coding: UTF-8 -*-
+"""
+Basic support to use a --dry-run mode w/ invoke tasks.
+
+.. code-block::
+
+    from ._dry_run import DryRunContext
+
+    @task
+    def destroy_something(ctx, path, dry_run=False):
+        if dry_run:
+            ctx = DryRunContext(ctx)
+
+        # -- DRY-RUN MODE: Only echos commands.
+        ctx.run("rm -rf {}".format(path))
+"""
+
+from __future__ import print_function
+
+class DryRunContext(object):
+    PREFIX = "DRY-RUN: "
+    SCHEMA = "{prefix}{command}"
+    SCHEMA_WITH_KWARGS = "{prefix}{command} (with kwargs={kwargs})"
+
+    def __init__(self, ctx=None, prefix=None, schema=None):
+        if prefix is None:
+            prefix = self.PREFIX
+        if schema is None:
+            schema = self.SCHEMA
+
+        self.ctx = ctx
+        self.prefix = prefix
+        self.schema = schema
+
+    def run(self, command, **kwargs):
+        message = self.schema.format(command=command,
+                                     prefix=self.prefix,
+                                     kwargs=kwargs)
+        print(message)
+
+
+    def sudo(self, command, **kwargs):
+        command2 = "sudo %s" % command
+        self.run(command2, **kwargs)
diff --git a/tasks/_setup.py b/tasks/_setup.py
new file mode 100644
index 0000000..9694a68
--- /dev/null
+++ b/tasks/_setup.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+"""
+Decides if vendor bundles are used or not.
+Setup python path accordingly.
+"""
+
+from __future__ import absolute_import, print_function
+import os.path
+import sys
+
+# -----------------------------------------------------------------------------
+# DEFINES:
+# -----------------------------------------------------------------------------
+HERE = os.path.dirname(__file__)
+TASKS_VENDOR_DIR = os.path.join(HERE, "_vendor")
+INVOKE_BUNDLE = os.path.join(TASKS_VENDOR_DIR, "invoke.zip")
+INVOKE_BUNDLE_VERSION = "1.2.0"
+
+DEBUG_SYSPATH = False
+
+
+# -----------------------------------------------------------------------------
+# EXCEPTIONS:
+# -----------------------------------------------------------------------------
+class VersionRequirementError(SystemExit):
+    pass
+
+
+# -----------------------------------------------------------------------------
+# FUNCTIONS:
+# -----------------------------------------------------------------------------
+def setup_path(invoke_minversion=None):
+    """Setup python search and add ``TASKS_VENDOR_DIR`` (if available)."""
+    # print("INVOKE.tasks: setup_path")
+    if not os.path.isdir(TASKS_VENDOR_DIR):
+        print("SKIP: TASKS_VENDOR_DIR=%s is missing" % TASKS_VENDOR_DIR)
+        return
+    elif os.path.abspath(TASKS_VENDOR_DIR) in sys.path:
+        # -- SETUP ALREADY DONE:
+        # return
+        pass
+
+    use_vendor_bundles = os.environ.get("INVOKE_TASKS_USE_VENDOR_BUNDLES", "no")
+    if need_vendor_bundles(invoke_minversion):
+        use_vendor_bundles = "yes"
+
+    if use_vendor_bundles == "yes":
+        syspath_insert(0, os.path.abspath(TASKS_VENDOR_DIR))
+        if setup_path_for_bundle(INVOKE_BUNDLE, pos=1):
+            import invoke
+            bundle_path = os.path.relpath(INVOKE_BUNDLE, os.getcwd())
+            print("USING: %s (version: %s)" % (bundle_path, invoke.__version__))
+    else:
+        # -- BEST-EFFORT: May rescue something
+        syspath_append(os.path.abspath(TASKS_VENDOR_DIR))
+        setup_path_for_bundle(INVOKE_BUNDLE, pos=len(sys.path))
+
+    if DEBUG_SYSPATH:
+        for index, p in enumerate(sys.path):
+            print("  %d.  %s" % (index, p))
+
+
+def require_invoke_minversion(min_version, verbose=False):
+    """Ensures that :mod:`invoke` has at the least the :param:`min_version`.
+    Otherwise,
+
+    :param min_version: Minimal acceptable invoke version (as string).
+    :param verbose:     Indicates if invoke.version should be shown.
+    :raises: VersionRequirementError=SystemExit if requirement fails.
+    """
+    # -- REQUIRES: sys.path is setup and contains invoke
+    try:
+        import invoke
+        invoke_version = invoke.__version__
+    except ImportError:
+        invoke_version = "__NOT_INSTALLED"
+
+    if invoke_version < min_version:
+        message = "REQUIRE: invoke.version >= %s (but was: %s)" % \
+                  (min_version, invoke_version)
+        message += "\nUSE: pip install invoke>=%s" % min_version
+        raise VersionRequirementError(message)
+
+    # pylint: disable=invalid-name
+    INVOKE_VERSION = os.environ.get("INVOKE_VERSION", None)
+    if verbose and not INVOKE_VERSION:
+        os.environ["INVOKE_VERSION"] = invoke_version
+        print("USING: invoke.version=%s" % invoke_version)
+
+
+def need_vendor_bundles(invoke_minversion=None):
+    invoke_minversion = invoke_minversion or "0.0.0"
+    need_vendor_answers = []
+    need_vendor_answers.append(need_vendor_bundle_invoke(invoke_minversion))
+    # -- REQUIRE: path.py
+    try:
+        import path
+        need_bundle = False
+    except ImportError:
+        need_bundle = True
+    need_vendor_answers.append(need_bundle)
+
+    # -- DIAG: print("INVOKE: need_bundle=%s" % need_bundle1)
+    # return need_bundle1 or need_bundle2
+    return any(need_vendor_answers)
+
+
+def need_vendor_bundle_invoke(invoke_minversion="0.0.0"):
+    # -- REQUIRE: invoke
+    try:
+        import invoke
+        need_bundle = invoke.__version__ < invoke_minversion
+        if need_bundle:
+            del sys.modules["invoke"]
+            del invoke
+    except ImportError:
+        need_bundle = True
+    except Exception:   # pylint: disable=broad-except
+        need_bundle = True
+    return need_bundle
+
+
+# -----------------------------------------------------------------------------
+# UTILITY FUNCTIONS:
+# -----------------------------------------------------------------------------
+def setup_path_for_bundle(bundle_path, pos=0):
+    if os.path.exists(bundle_path):
+        syspath_insert(pos, os.path.abspath(bundle_path))
+        return True
+    return False
+
+
+def syspath_insert(pos, path):
+    if path in sys.path:
+        sys.path.remove(path)
+    sys.path.insert(pos, path)
+
+
+def syspath_append(path):
+    if path in sys.path:
+        sys.path.remove(path)
+    sys.path.append(path)
+
diff --git a/tasks/_tasklet_cleanup.py b/tasks/_tasklet_cleanup.py
new file mode 100644
index 0000000..2999bc6
--- /dev/null
+++ b/tasks/_tasklet_cleanup.py
@@ -0,0 +1,295 @@
+# -*- coding: UTF-8 -*-
+"""
+Provides cleanup tasks for invoke build scripts (as generic invoke tasklet).
+Simplifies writing common, composable and extendable cleanup tasks.
+
+PYTHON PACKAGE REQUIREMENTS:
+* path.py >= 8.2.1  (as path-object abstraction)
+* pathlib (for ant-like wildcard patterns; since: python > 3.5)
+* pycmd (required-by: clean_python())
+
+clean task: Add Additional Directories and Files to be removed
+-------------------------------------------------------------------------------
+
+Create an invoke configuration file (YAML of JSON) with the additional
+configuration data:
+
+.. code-block:: yaml
+
+    # -- FILE: invoke.yaml
+    # USE: clean.directories, clean.files to override current configuration.
+    clean:
+        extra_directories:
+            - **/tmp/
+        extra_files:
+            - **/*.log
+            - **/*.bak
+
+
+Registration of Cleanup Tasks
+------------------------------
+
+Other task modules often have an own cleanup task to recover the clean state.
+The :meth:`clean` task, that is provided here, supports the registration
+of additional cleanup tasks. Therefore, when the :meth:`clean` task is executed,
+all registered cleanup tasks will be executed.
+
+EXAMPLE::
+
+    # -- FILE: tasks/docs.py
+    from __future__ import absolute_import
+    from invoke import task, Collection
+    from tasklet_cleanup import cleanup_tasks, cleanup_dirs
+
+    @task
+    def clean(ctx, dry_run=False):
+        "Cleanup generated documentation artifacts."
+        cleanup_dirs(["build/docs"])
+
+    namespace = Collection(clean)
+    ...
+
+    # -- REGISTER CLEANUP TASK:
+    cleanup_tasks.add_task(clean, "clean_docs")
+    cleanup_tasks.configure(namespace.configuration())
+"""
+
+from __future__ import absolute_import, print_function
+import os.path
+import sys
+import pathlib
+from invoke import task, Collection
+from invoke.executor import Executor
+from invoke.exceptions import Exit, Failure, UnexpectedExit
+from path import Path
+
+
+# -----------------------------------------------------------------------------
+# CLEANUP UTILITIES:
+# -----------------------------------------------------------------------------
+def cleanup_accept_old_config(ctx):
+    ctx.cleanup.directories.extend(ctx.clean.directories or [])
+    ctx.cleanup.extra_directories.extend(ctx.clean.extra_directories or [])
+    ctx.cleanup.files.extend(ctx.clean.files or [])
+    ctx.cleanup.extra_files.extend(ctx.clean.extra_files or [])
+
+    ctx.cleanup_all.directories.extend(ctx.clean_all.directories or [])
+    ctx.cleanup_all.extra_directories.extend(ctx.clean_all.extra_directories or [])
+    ctx.cleanup_all.files.extend(ctx.clean_all.files or [])
+    ctx.cleanup_all.extra_files.extend(ctx.clean_all.extra_files or [])
+
+
+def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
+    """Execute several cleanup tasks as part of the cleanup.
+
+    REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
+
+    :param ctx:             Context object for the tasks.
+    :param cleanup_tasks:   Collection of cleanup tasks (as Collection).
+    :param dry_run:         Indicates dry-run mode (bool)
+    """
+    # pylint: disable=redefined-outer-name
+    executor = Executor(cleanup_tasks, ctx.config)
+    failure_count = 0
+    for cleanup_task in cleanup_tasks.tasks:
+        try:
+            print("CLEANUP TASK: %s" % cleanup_task)
+            executor.execute((cleanup_task, dict(dry_run=dry_run)))
+        except (Exit, Failure, UnexpectedExit) as e:
+            print("FAILURE in CLEANUP TASK: %s (GRACEFULLY-IGNORED)" % cleanup_task)
+            failure_count += 1
+
+    if failure_count:
+        print("CLEANUP TASKS: %d failure(s) occured" % failure_count)
+
+
+def cleanup_dirs(patterns, dry_run=False, workdir="."):
+    """Remove directories (and their contents) recursively.
+    Skips removal if directories does not exist.
+
+    :param patterns:    Directory name patterns, like "**/tmp*" (as list).
+    :param dry_run:     Dry-run mode indicator (as bool).
+    :param workdir:     Current work directory (default=".")
+    """
+    current_dir = Path(workdir)
+    python_basedir = Path(Path(sys.executable).dirname()).joinpath("..").abspath()
+    warn2_counter = 0
+    for dir_pattern in patterns:
+        for directory in path_glob(dir_pattern, current_dir):
+            directory2 = directory.abspath()
+            if sys.executable.startswith(directory2):
+                # pylint: disable=line-too-long
+                print("SKIP-SUICIDE: '%s' contains current python executable" % directory)
+                continue
+            elif directory2.startswith(python_basedir):
+                # -- PROTECT CURRENTLY USED VIRTUAL ENVIRONMENT:
+                if warn2_counter <= 4:
+                    print("SKIP-SUICIDE: '%s'" % directory)
+                warn2_counter += 1
+                continue
+
+            if not directory.isdir():
+                print("RMTREE: %s (SKIPPED: Not a directory)" % directory)
+                continue
+
+            if dry_run:
+                print("RMTREE: %s (dry-run)" % directory)
+            else:
+                print("RMTREE: %s" % directory)
+                directory.rmtree_p()
+
+
+def cleanup_files(patterns, dry_run=False, workdir="."):
+    """Remove files or files selected by file patterns.
+    Skips removal if file does not exist.
+
+    :param patterns:    File patterns, like "**/*.pyc" (as list).
+    :param dry_run:     Dry-run mode indicator (as bool).
+    :param workdir:     Current work directory (default=".")
+    """
+    current_dir = Path(workdir)
+    python_basedir = Path(Path(sys.executable).dirname()).joinpath("..").abspath()
+    error_message = None
+    error_count = 0
+    for file_pattern in patterns:
+        for file_ in path_glob(file_pattern, current_dir):
+            if file_.abspath().startswith(python_basedir):
+                # -- PROTECT CURRENTLY USED VIRTUAL ENVIRONMENT:
+                continue
+            if not file_.isfile():
+                print("REMOVE: %s (SKIPPED: Not a file)" % file_)
+                continue
+
+            if dry_run:
+                print("REMOVE: %s (dry-run)" % file_)
+            else:
+                print("REMOVE: %s" % file_)
+                try:
+                    file_.remove_p()
+                except os.error as e:
+                    message = "%s: %s" % (e.__class__.__name__, e)
+                    print(message + " basedir: "+ python_basedir)
+                    error_count += 1
+                    if not error_message:
+                        error_message = message
+    if False and error_message:
+        class CleanupError(RuntimeError):
+            pass
+        raise CleanupError(error_message)
+
+
+def path_glob(pattern, current_dir=None):
+    """Use pathlib for ant-like patterns, like: "**/*.py"
+
+    :param pattern:      File/directory pattern to use (as string).
+    :param current_dir:  Current working directory (as Path, pathlib.Path, str)
+    :return Resolved Path (as path.Path).
+    """
+    if not current_dir:
+        current_dir = pathlib.Path.cwd()
+    elif not isinstance(current_dir, pathlib.Path):
+        # -- CASE: string, path.Path (string-like)
+        current_dir = pathlib.Path(str(current_dir))
+
+    for p in current_dir.glob(pattern):
+        yield Path(str(p))
+
+
+# -----------------------------------------------------------------------------
+# GENERIC CLEANUP TASKS:
+# -----------------------------------------------------------------------------
+@task
+def clean(ctx, dry_run=False):
+    """Cleanup temporary dirs/files to regain a clean state."""
+    cleanup_accept_old_config(ctx)
+    directories = ctx.cleanup.directories or []
+    directories.extend(ctx.cleanup.extra_directories or [])
+    files = ctx.cleanup.files or []
+    files.extend(ctx.cleanup.extra_files or [])
+
+    # -- PERFORM CLEANUP:
+    execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=dry_run)
+    cleanup_dirs(directories, dry_run=dry_run)
+    cleanup_files(files, dry_run=dry_run)
+
+
+@task(name="all", aliases=("distclean",))
+def clean_all(ctx, dry_run=False):
+    """Clean up everything, even the precious stuff.
+    NOTE: clean task is executed first.
+    """
+    cleanup_accept_old_config(ctx)
+    directories = ctx.config.cleanup_all.directories or []
+    directories.extend(ctx.config.cleanup_all.extra_directories or [])
+    files = ctx.config.cleanup_all.files or []
+    files.extend(ctx.config.cleanup_all.extra_files or [])
+
+    # -- PERFORM CLEANUP:
+    # HINT: Remove now directories, files first before cleanup-tasks.
+    cleanup_dirs(directories, dry_run=dry_run)
+    cleanup_files(files, dry_run=dry_run)
+    execute_cleanup_tasks(ctx, cleanup_all_tasks, dry_run=dry_run)
+    clean(ctx, dry_run=dry_run)
+
+
+@task(name="python")
+def clean_python(ctx, dry_run=False):
+    """Cleanup python related files/dirs: *.pyc, *.pyo, ..."""
+    # MAYBE NOT: "**/__pycache__"
+    cleanup_dirs(["build", "dist", "*.egg-info", "**/__pycache__"],
+                 dry_run=dry_run)
+    if not dry_run:
+        ctx.run("py.cleanup")
+    cleanup_files(["**/*.pyc", "**/*.pyo", "**/*$py.class"], dry_run=dry_run)
+
+
+# -----------------------------------------------------------------------------
+# TASK CONFIGURATION:
+# -----------------------------------------------------------------------------
+CLEANUP_EMPTY_CONFIG = {
+    "directories": [],
+    "files": [],
+    "extra_directories": [],
+    "extra_files": [],
+}
+def make_cleanup_config(**kwargs):
+    config_data = CLEANUP_EMPTY_CONFIG.copy()
+    config_data.update(kwargs)
+    return config_data
+
+
+namespace = Collection(clean_all, clean_python)
+namespace.add_task(clean, default=True)
+namespace.configure({
+    "cleanup": make_cleanup_config(
+        files=["*.bak", "*.log", "*.tmp", "**/.DS_Store", "**/*.~*~"]
+    ),
+    "cleanup_all": make_cleanup_config(
+        directories=[".venv*", ".tox", "downloads", "tmp"]
+    ),
+    # -- BACKWARD-COMPATIBLE: OLD-STYLE
+    "clean":     CLEANUP_EMPTY_CONFIG.copy(),
+    "clean_all": CLEANUP_EMPTY_CONFIG.copy(),
+})
+
+
+# -- EXTENSION-POINT: CLEANUP TASKS (called by: clean, clean_all task)
+# NOTE: Can be used by other tasklets to register cleanup tasks.
+cleanup_tasks = Collection("cleanup_tasks")
+cleanup_all_tasks = Collection("cleanup_all_tasks")
+
+# -- EXTEND NORMAL CLEANUP-TASKS:
+# DISABLED: cleanup_tasks.add_task(clean_python)
+#
+# -----------------------------------------------------------------------------
+# EXTENSION-POINT: CONFIGURATION HELPERS: Can be used from other task modules
+# -----------------------------------------------------------------------------
+def config_add_cleanup_dirs(directories):
+    # pylint: disable=protected-access
+    the_cleanup_directories = namespace._configuration["clean"]["directories"]
+    the_cleanup_directories.extend(directories)
+
+def config_add_cleanup_files(files):
+    # pylint: disable=protected-access
+    the_cleanup_files = namespace._configuration["clean"]["files"]
+    the_cleanup_files.extend(files)
diff --git a/tasks/_vendor/README.rst b/tasks/_vendor/README.rst
new file mode 100644
index 0000000..68fc06a
--- /dev/null
+++ b/tasks/_vendor/README.rst
@@ -0,0 +1,35 @@
+tasks/_vendor: Bundled vendor parts -- needed by tasks
+===============================================================================
+
+This directory contains bundled archives that may be needed to run the tasks.
+Especially, it contains an executable "invoke.zip" archive.
+This archive can be used when invoke is not installed.
+
+To execute invoke from the bundled ZIP archive::
+
+
+    python -m tasks/_vendor/invoke.zip --help
+    python -m tasks/_vendor/invoke.zip --version
+
+
+Example for a local "bin/invoke" script in a UNIX like platform environment::
+
+    #!/bin/bash
+    # RUN INVOKE: From bundled ZIP file.
+
+    HERE=$(dirname $0)
+
+    python ${HERE}/../tasks/_vendor/invoke.zip $*
+
+Example for a local "bin/invoke.cmd" script in a Windows environment::
+
+    @echo off
+    REM ==========================================================================
+    REM RUN INVOKE: From bundled ZIP file.
+    REM ==========================================================================
+
+    setlocal
+    set HERE=%~dp0
+    if not defined PYTHON   set PYTHON=python
+
+    %PYTHON% %HERE%../tasks/_vendor/invoke.zip "%*"
diff --git a/tasks/_vendor/invoke.zip b/tasks/_vendor/invoke.zip
new file mode 100644
index 0000000..bd19412
--- /dev/null
+++ b/tasks/_vendor/invoke.zip
Binary files differ
diff --git a/tasks/_vendor/path.py b/tasks/_vendor/path.py
new file mode 100644
index 0000000..2c7a71c
--- /dev/null
+++ b/tasks/_vendor/path.py
@@ -0,0 +1,1725 @@
+#
+# SOURCE: https://pypi.python.org/pypi/path.py
+# VERSION: 8.2.1
+# -----------------------------------------------------------------------------
+# Copyright (c) 2010 Mikhail Gusarov
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+
+"""
+path.py - An object representing a path to a file or directory.
+
+https://github.com/jaraco/path.py
+
+Example::
+
+    from path import Path
+    d = Path('/home/guido/bin')
+    for f in d.files('*.py'):
+        f.chmod(0o755)
+"""
+
+from __future__ import unicode_literals
+
+import sys
+import warnings
+import os
+import fnmatch
+import glob
+import shutil
+import codecs
+import hashlib
+import errno
+import tempfile
+import functools
+import operator
+import re
+import contextlib
+import io
+from distutils import dir_util
+import importlib
+
+try:
+    import win32security
+except ImportError:
+    pass
+
+try:
+    import pwd
+except ImportError:
+    pass
+
+try:
+    import grp
+except ImportError:
+    pass
+
+##############################################################################
+# Python 2/3 support
+PY3 = sys.version_info >= (3,)
+PY2 = not PY3
+
+string_types = str,
+text_type = str
+getcwdu = os.getcwd
+
+def surrogate_escape(error):
+    """
+    Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only.
+    """
+    chars = error.object[error.start:error.end]
+    assert len(chars) == 1
+    val = ord(chars)
+    val += 0xdc00
+    return __builtin__.unichr(val), error.end
+
+if PY2:
+    import __builtin__
+    string_types = __builtin__.basestring,
+    text_type = __builtin__.unicode
+    getcwdu = os.getcwdu
+    codecs.register_error('surrogateescape', surrogate_escape)
+
+@contextlib.contextmanager
+def io_error_compat():
+    try:
+        yield
+    except IOError as io_err:
+        # On Python 2, io.open raises IOError; transform to OSError for
+        # future compatibility.
+        os_err = OSError(*io_err.args)
+        os_err.filename = getattr(io_err, 'filename', None)
+        raise os_err
+
+##############################################################################
+
+__all__ = ['Path', 'CaseInsensitivePattern']
+
+
+LINESEPS = ['\r\n', '\r', '\n']
+U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029']
+NEWLINE = re.compile('|'.join(LINESEPS))
+U_NEWLINE = re.compile('|'.join(U_LINESEPS))
+NL_END = re.compile(r'(?:{0})$'.format(NEWLINE.pattern))
+U_NL_END = re.compile(r'(?:{0})$'.format(U_NEWLINE.pattern))
+
+
+try:
+    import pkg_resources
+    __version__ = pkg_resources.require('path.py')[0].version
+except Exception:
+    __version__ = '8.2.1'   # XXX-MODIFIED-WAS: 'unknown'
+
+
+class TreeWalkWarning(Warning):
+    pass
+
+
+# from jaraco.functools
+def compose(*funcs):
+    compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs))
+    return functools.reduce(compose_two, funcs)
+
+
+def simple_cache(func):
+    """
+    Save results for the :meth:'path.using_module' classmethod.
+    When Python 3.2 is available, use functools.lru_cache instead.
+    """
+    saved_results = {}
+
+    def wrapper(cls, module):
+        if module in saved_results:
+            return saved_results[module]
+        saved_results[module] = func(cls, module)
+        return saved_results[module]
+    return wrapper
+
+
+class ClassProperty(property):
+    def __get__(self, cls, owner):
+        return self.fget.__get__(None, owner)()
+
+
+class multimethod(object):
+    """
+    Acts like a classmethod when invoked from the class and like an
+    instancemethod when invoked from the instance.
+    """
+    def __init__(self, func):
+        self.func = func
+
+    def __get__(self, instance, owner):
+        return (
+            functools.partial(self.func, owner) if instance is None
+            else functools.partial(self.func, owner, instance)
+        )
+
+
+class Path(text_type):
+    """
+    Represents a filesystem path.
+
+    For documentation on individual methods, consult their
+    counterparts in :mod:`os.path`.
+
+    Some methods are additionally included from :mod:`shutil`.
+    The functions are linked directly into the class namespace
+    such that they will be bound to the Path instance. For example,
+    ``Path(src).copy(target)`` is equivalent to
+    ``shutil.copy(src, target)``. Therefore, when referencing
+    the docs for these methods, assume `src` references `self`,
+    the Path instance.
+    """
+
+    module = os.path
+    """ The path module to use for path operations.
+
+    .. seealso:: :mod:`os.path`
+    """
+
+    def __init__(self, other=''):
+        if other is None:
+            raise TypeError("Invalid initial value for path: None")
+
+    @classmethod
+    @simple_cache
+    def using_module(cls, module):
+        subclass_name = cls.__name__ + '_' + module.__name__
+        if PY2:
+            subclass_name = str(subclass_name)
+        bases = (cls,)
+        ns = {'module': module}
+        return type(subclass_name, bases, ns)
+
+    @ClassProperty
+    @classmethod
+    def _next_class(cls):
+        """
+        What class should be used to construct new instances from this class
+        """
+        return cls
+
+    @classmethod
+    def _always_unicode(cls, path):
+        """
+        Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
+        is a proper Unicode string.
+        """
+        if PY3 or isinstance(path, text_type):
+            return path
+        return path.decode(sys.getfilesystemencoding(), 'surrogateescape')
+
+    # --- Special Python methods.
+
+    def __repr__(self):
+        return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__())
+
+    # Adding a Path and a string yields a Path.
+    def __add__(self, more):
+        try:
+            return self._next_class(super(Path, self).__add__(more))
+        except TypeError:  # Python bug
+            return NotImplemented
+
+    def __radd__(self, other):
+        if not isinstance(other, string_types):
+            return NotImplemented
+        return self._next_class(other.__add__(self))
+
+    # The / operator joins Paths.
+    def __div__(self, rel):
+        """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
+
+        Join two path components, adding a separator character if
+        needed.
+
+        .. seealso:: :func:`os.path.join`
+        """
+        return self._next_class(self.module.join(self, rel))
+
+    # Make the / operator work even when true division is enabled.
+    __truediv__ = __div__
+
+    # The / operator joins Paths the other way around
+    def __rdiv__(self, rel):
+        """ fp.__rdiv__(rel) == rel / fp
+
+        Join two path components, adding a separator character if
+        needed.
+
+        .. seealso:: :func:`os.path.join`
+        """
+        return self._next_class(self.module.join(rel, self))
+
+    # Make the / operator work even when true division is enabled.
+    __rtruediv__ = __rdiv__
+
+    def __enter__(self):
+        self._old_dir = self.getcwd()
+        os.chdir(self)
+        return self
+
+    def __exit__(self, *_):
+        os.chdir(self._old_dir)
+
+    @classmethod
+    def getcwd(cls):
+        """ Return the current working directory as a path object.
+
+        .. seealso:: :func:`os.getcwdu`
+        """
+        return cls(getcwdu())
+
+    #
+    # --- Operations on Path strings.
+
+    def abspath(self):
+        """ .. seealso:: :func:`os.path.abspath` """
+        return self._next_class(self.module.abspath(self))
+
+    def normcase(self):
+        """ .. seealso:: :func:`os.path.normcase` """
+        return self._next_class(self.module.normcase(self))
+
+    def normpath(self):
+        """ .. seealso:: :func:`os.path.normpath` """
+        return self._next_class(self.module.normpath(self))
+
+    def realpath(self):
+        """ .. seealso:: :func:`os.path.realpath` """
+        return self._next_class(self.module.realpath(self))
+
+    def expanduser(self):
+        """ .. seealso:: :func:`os.path.expanduser` """
+        return self._next_class(self.module.expanduser(self))
+
+    def expandvars(self):
+        """ .. seealso:: :func:`os.path.expandvars` """
+        return self._next_class(self.module.expandvars(self))
+
+    def dirname(self):
+        """ .. seealso:: :attr:`parent`, :func:`os.path.dirname` """
+        return self._next_class(self.module.dirname(self))
+
+    def basename(self):
+        """ .. seealso:: :attr:`name`, :func:`os.path.basename` """
+        return self._next_class(self.module.basename(self))
+
+    def expand(self):
+        """ Clean up a filename by calling :meth:`expandvars()`,
+        :meth:`expanduser()`, and :meth:`normpath()` on it.
+
+        This is commonly everything needed to clean up a filename
+        read from a configuration file, for example.
+        """
+        return self.expandvars().expanduser().normpath()
+
+    @property
+    def namebase(self):
+        """ The same as :meth:`name`, but with one file extension stripped off.
+
+        For example,
+        ``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``,
+        but
+        ``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``.
+        """
+        base, ext = self.module.splitext(self.name)
+        return base
+
+    @property
+    def ext(self):
+        """ The file extension, for example ``'.py'``. """
+        f, ext = self.module.splitext(self)
+        return ext
+
+    @property
+    def drive(self):
+        """ The drive specifier, for example ``'C:'``.
+
+        This is always empty on systems that don't use drive specifiers.
+        """
+        drive, r = self.module.splitdrive(self)
+        return self._next_class(drive)
+
+    parent = property(
+        dirname, None, None,
+        """ This path's parent directory, as a new Path object.
+
+        For example,
+        ``Path('/usr/local/lib/libpython.so').parent ==
+        Path('/usr/local/lib')``
+
+        .. seealso:: :meth:`dirname`, :func:`os.path.dirname`
+        """)
+
+    name = property(
+        basename, None, None,
+        """ The name of this file or directory without the full path.
+
+        For example,
+        ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'``
+
+        .. seealso:: :meth:`basename`, :func:`os.path.basename`
+        """)
+
+    def splitpath(self):
+        """ p.splitpath() -> Return ``(p.parent, p.name)``.
+
+        .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
+        """
+        parent, child = self.module.split(self)
+        return self._next_class(parent), child
+
+    def splitdrive(self):
+        """ p.splitdrive() -> Return ``(p.drive, <the rest of p>)``.
+
+        Split the drive specifier from this path.  If there is
+        no drive specifier, :samp:`{p.drive}` is empty, so the return value
+        is simply ``(Path(''), p)``.  This is always the case on Unix.
+
+        .. seealso:: :func:`os.path.splitdrive`
+        """
+        drive, rel = self.module.splitdrive(self)
+        return self._next_class(drive), rel
+
+    def splitext(self):
+        """ p.splitext() -> Return ``(p.stripext(), p.ext)``.
+
+        Split the filename extension from this path and return
+        the two parts.  Either part may be empty.
+
+        The extension is everything from ``'.'`` to the end of the
+        last path segment.  This has the property that if
+        ``(a, b) == p.splitext()``, then ``a + b == p``.
+
+        .. seealso:: :func:`os.path.splitext`
+        """
+        filename, ext = self.module.splitext(self)
+        return self._next_class(filename), ext
+
+    def stripext(self):
+        """ p.stripext() -> Remove one file extension from the path.
+
+        For example, ``Path('/home/guido/python.tar.gz').stripext()``
+        returns ``Path('/home/guido/python.tar')``.
+        """
+        return self.splitext()[0]
+
+    def splitunc(self):
+        """ .. seealso:: :func:`os.path.splitunc` """
+        unc, rest = self.module.splitunc(self)
+        return self._next_class(unc), rest
+
+    @property
+    def uncshare(self):
+        """
+        The UNC mount point for this path.
+        This is empty for paths on local drives.
+        """
+        unc, r = self.module.splitunc(self)
+        return self._next_class(unc)
+
+    @multimethod
+    def joinpath(cls, first, *others):
+        """
+        Join first to zero or more :class:`Path` components, adding a separator
+        character (:samp:`{first}.module.sep`) if needed.  Returns a new instance of
+        :samp:`{first}._next_class`.
+
+        .. seealso:: :func:`os.path.join`
+        """
+        if not isinstance(first, cls):
+            first = cls(first)
+        return first._next_class(first.module.join(first, *others))
+
+    def splitall(self):
+        r""" Return a list of the path components in this path.
+
+        The first item in the list will be a Path.  Its value will be
+        either :data:`os.curdir`, :data:`os.pardir`, empty, or the root
+        directory of this path (for example, ``'/'`` or ``'C:\\'``).  The
+        other items in the list will be strings.
+
+        ``path.Path.joinpath(*result)`` will yield the original path.
+        """
+        parts = []
+        loc = self
+        while loc != os.curdir and loc != os.pardir:
+            prev = loc
+            loc, child = prev.splitpath()
+            if loc == prev:
+                break
+            parts.append(child)
+        parts.append(loc)
+        parts.reverse()
+        return parts
+
+    def relpath(self, start='.'):
+        """ Return this path as a relative path,
+        based from `start`, which defaults to the current working directory.
+        """
+        cwd = self._next_class(start)
+        return cwd.relpathto(self)
+
+    def relpathto(self, dest):
+        """ Return a relative path from `self` to `dest`.
+
+        If there is no relative path from `self` to `dest`, for example if
+        they reside on different drives in Windows, then this returns
+        ``dest.abspath()``.
+        """
+        origin = self.abspath()
+        dest = self._next_class(dest).abspath()
+
+        orig_list = origin.normcase().splitall()
+        # Don't normcase dest!  We want to preserve the case.
+        dest_list = dest.splitall()
+
+        if orig_list[0] != self.module.normcase(dest_list[0]):
+            # Can't get here from there.
+            return dest
+
+        # Find the location where the two paths start to differ.
+        i = 0
+        for start_seg, dest_seg in zip(orig_list, dest_list):
+            if start_seg != self.module.normcase(dest_seg):
+                break
+            i += 1
+
+        # Now i is the point where the two paths diverge.
+        # Need a certain number of "os.pardir"s to work up
+        # from the origin to the point of divergence.
+        segments = [os.pardir] * (len(orig_list) - i)
+        # Need to add the diverging part of dest_list.
+        segments += dest_list[i:]
+        if len(segments) == 0:
+            # If they happen to be identical, use os.curdir.
+            relpath = os.curdir
+        else:
+            relpath = self.module.join(*segments)
+        return self._next_class(relpath)
+
+    # --- Listing, searching, walking, and matching
+
+    def listdir(self, pattern=None):
+        """ D.listdir() -> List of items in this directory.
+
+        Use :meth:`files` or :meth:`dirs` instead if you want a listing
+        of just files or just subdirectories.
+
+        The elements of the list are Path objects.
+
+        With the optional `pattern` argument, this only lists
+        items whose names match the given pattern.
+
+        .. seealso:: :meth:`files`, :meth:`dirs`
+        """
+        if pattern is None:
+            pattern = '*'
+        return [
+            self / child
+            for child in map(self._always_unicode, os.listdir(self))
+            if self._next_class(child).fnmatch(pattern)
+        ]
+
+    def dirs(self, pattern=None):
+        """ D.dirs() -> List of this directory's subdirectories.
+
+        The elements of the list are Path objects.
+        This does not walk recursively into subdirectories
+        (but see :meth:`walkdirs`).
+
+        With the optional `pattern` argument, this only lists
+        directories whose names match the given pattern.  For
+        example, ``d.dirs('build-*')``.
+        """
+        return [p for p in self.listdir(pattern) if p.isdir()]
+
+    def files(self, pattern=None):
+        """ D.files() -> List of the files in this directory.
+
+        The elements of the list are Path objects.
+        This does not walk into subdirectories (see :meth:`walkfiles`).
+
+        With the optional `pattern` argument, this only lists files
+        whose names match the given pattern.  For example,
+        ``d.files('*.pyc')``.
+        """
+
+        return [p for p in self.listdir(pattern) if p.isfile()]
+
+    def walk(self, pattern=None, errors='strict'):
+        """ D.walk() -> iterator over files and subdirs, recursively.
+
+        The iterator yields Path objects naming each child item of
+        this directory and its descendants.  This requires that
+        ``D.isdir()``.
+
+        This performs a depth-first traversal of the directory tree.
+        Each directory is returned just before all its children.
+
+        The `errors=` keyword argument controls behavior when an
+        error occurs.  The default is ``'strict'``, which causes an
+        exception.  Other allowed values are ``'warn'`` (which
+        reports the error via :func:`warnings.warn()`), and ``'ignore'``.
+        `errors` may also be an arbitrary callable taking a msg parameter.
+        """
+        class Handlers:
+            def strict(msg):
+                raise
+
+            def warn(msg):
+                warnings.warn(msg, TreeWalkWarning)
+
+            def ignore(msg):
+                pass
+
+        if not callable(errors) and errors not in vars(Handlers):
+            raise ValueError("invalid errors parameter")
+        errors = vars(Handlers).get(errors, errors)
+
+        try:
+            childList = self.listdir()
+        except Exception:
+            exc = sys.exc_info()[1]
+            tmpl = "Unable to list directory '%(self)s': %(exc)s"
+            msg = tmpl % locals()
+            errors(msg)
+            return
+
+        for child in childList:
+            if pattern is None or child.fnmatch(pattern):
+                yield child
+            try:
+                isdir = child.isdir()
+            except Exception:
+                exc = sys.exc_info()[1]
+                tmpl = "Unable to access '%(child)s': %(exc)s"
+                msg = tmpl % locals()
+                errors(msg)
+                isdir = False
+
+            if isdir:
+                for item in child.walk(pattern, errors):
+                    yield item
+
+    def walkdirs(self, pattern=None, errors='strict'):
+        """ D.walkdirs() -> iterator over subdirs, recursively.
+
+        With the optional `pattern` argument, this yields only
+        directories whose names match the given pattern.  For
+        example, ``mydir.walkdirs('*test')`` yields only directories
+        with names ending in ``'test'``.
+
+        The `errors=` keyword argument controls behavior when an
+        error occurs.  The default is ``'strict'``, which causes an
+        exception.  The other allowed values are ``'warn'`` (which
+        reports the error via :func:`warnings.warn()`), and ``'ignore'``.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            dirs = self.dirs()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in dirs:
+            if pattern is None or child.fnmatch(pattern):
+                yield child
+            for subsubdir in child.walkdirs(pattern, errors):
+                yield subsubdir
+
+    def walkfiles(self, pattern=None, errors='strict'):
+        """ D.walkfiles() -> iterator over files in D, recursively.
+
+        The optional argument `pattern` limits the results to files
+        with names that match the pattern.  For example,
+        ``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
+        extension.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            childList = self.listdir()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in childList:
+            try:
+                isfile = child.isfile()
+                isdir = not isfile and child.isdir()
+            except:
+                if errors == 'ignore':
+                    continue
+                elif errors == 'warn':
+                    warnings.warn(
+                        "Unable to access '%s': %s"
+                        % (self, sys.exc_info()[1]),
+                        TreeWalkWarning)
+                    continue
+                else:
+                    raise
+
+            if isfile:
+                if pattern is None or child.fnmatch(pattern):
+                    yield child
+            elif isdir:
+                for f in child.walkfiles(pattern, errors):
+                    yield f
+
+    def fnmatch(self, pattern, normcase=None):
+        """ Return ``True`` if `self.name` matches the given `pattern`.
+
+        `pattern` - A filename pattern with wildcards,
+            for example ``'*.py'``. If the pattern contains a `normcase`
+            attribute, it is applied to the name and path prior to comparison.
+
+        `normcase` - (optional) A function used to normalize the pattern and
+            filename before matching. Defaults to :meth:`self.module`, which defaults
+            to :meth:`os.path.normcase`.
+
+        .. seealso:: :func:`fnmatch.fnmatch`
+        """
+        default_normcase = getattr(pattern, 'normcase', self.module.normcase)
+        normcase = normcase or default_normcase
+        name = normcase(self.name)
+        pattern = normcase(pattern)
+        return fnmatch.fnmatchcase(name, pattern)
+
+    def glob(self, pattern):
+        """ Return a list of Path objects that match the pattern.
+
+        `pattern` - a path relative to this directory, with wildcards.
+
+        For example, ``Path('/users').glob('*/bin/*')`` returns a list
+        of all the files users have in their :file:`bin` directories.
+
+        .. seealso:: :func:`glob.glob`
+        """
+        cls = self._next_class
+        return [cls(s) for s in glob.glob(self / pattern)]
+
+    #
+    # --- Reading or writing an entire file at once.
+
+    def open(self, *args, **kwargs):
+        """ Open this file and return a corresponding :class:`file` object.
+
+        Keyword arguments work as in :func:`io.open`.  If the file cannot be
+        opened, an :class:`~exceptions.OSError` is raised.
+        """
+        with io_error_compat():
+            return io.open(self, *args, **kwargs)
+
+    def bytes(self):
+        """ Open this file, read all bytes, return them as a string. """
+        with self.open('rb') as f:
+            return f.read()
+
+    def chunks(self, size, *args, **kwargs):
+        """ Returns a generator yielding chunks of the file, so it can
+            be read piece by piece with a simple for loop.
+
+           Any argument you pass after `size` will be passed to :meth:`open`.
+
+           :example:
+
+               >>> hash = hashlib.md5()
+               >>> for chunk in Path("path.py").chunks(8192, mode='rb'):
+               ...     hash.update(chunk)
+
+            This will read the file by chunks of 8192 bytes.
+        """
+        with self.open(*args, **kwargs) as f:
+            for chunk in iter(lambda: f.read(size) or None, None):
+                yield chunk
+
+    def write_bytes(self, bytes, append=False):
+        """ Open this file and write the given bytes to it.
+
+        Default behavior is to overwrite any existing file.
+        Call ``p.write_bytes(bytes, append=True)`` to append instead.
+        """
+        if append:
+            mode = 'ab'
+        else:
+            mode = 'wb'
+        with self.open(mode) as f:
+            f.write(bytes)
+
+    def text(self, encoding=None, errors='strict'):
+        r""" Open this file, read it in, return the content as a string.
+
+        All newline sequences are converted to ``'\n'``.  Keyword arguments
+        will be passed to :meth:`open`.
+
+        .. seealso:: :meth:`lines`
+        """
+        with self.open(mode='r', encoding=encoding, errors=errors) as f:
+            return U_NEWLINE.sub('\n', f.read())
+
+    def write_text(self, text, encoding=None, errors='strict',
+                   linesep=os.linesep, append=False):
+        r""" Write the given text to this file.
+
+        The default behavior is to overwrite any existing file;
+        to append instead, use the `append=True` keyword argument.
+
+        There are two differences between :meth:`write_text` and
+        :meth:`write_bytes`: newline handling and Unicode handling.
+        See below.
+
+        Parameters:
+
+          `text` - str/unicode - The text to be written.
+
+          `encoding` - str - The Unicode encoding that will be used.
+              This is ignored if `text` isn't a Unicode string.
+
+          `errors` - str - How to handle Unicode encoding errors.
+              Default is ``'strict'``.  See ``help(unicode.encode)`` for the
+              options.  This is ignored if `text` isn't a Unicode
+              string.
+
+          `linesep` - keyword argument - str/unicode - The sequence of
+              characters to be used to mark end-of-line.  The default is
+              :data:`os.linesep`.  You can also specify ``None`` to
+              leave all newlines as they are in `text`.
+
+          `append` - keyword argument - bool - Specifies what to do if
+              the file already exists (``True``: append to the end of it;
+              ``False``: overwrite it.)  The default is ``False``.
+
+
+        --- Newline handling.
+
+        ``write_text()`` converts all standard end-of-line sequences
+        (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
+        end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
+        the end-of-line marker is ``'\r\n'``).
+
+        If you don't like your platform's default, you can override it
+        using the `linesep=` keyword argument.  If you specifically want
+        ``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
+
+        This applies to Unicode text the same as to 8-bit text, except
+        there are three additional standard Unicode end-of-line sequences:
+        ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
+
+        (This is slightly different from when you open a file for
+        writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
+        in Python.)
+
+
+        --- Unicode
+
+        If `text` isn't Unicode, then apart from newline handling, the
+        bytes are written verbatim to the file.  The `encoding` and
+        `errors` arguments are not used and must be omitted.
+
+        If `text` is Unicode, it is first converted to :func:`bytes` using the
+        specified `encoding` (or the default encoding if `encoding`
+        isn't specified).  The `errors` argument applies only to this
+        conversion.
+
+        """
+        if isinstance(text, text_type):
+            if linesep is not None:
+                text = U_NEWLINE.sub(linesep, text)
+            text = text.encode(encoding or sys.getdefaultencoding(), errors)
+        else:
+            assert encoding is None
+            text = NEWLINE.sub(linesep, text)
+        self.write_bytes(text, append=append)
+
+    def lines(self, encoding=None, errors='strict', retain=True):
+        r""" Open this file, read all lines, return them in a list.
+
+        Optional arguments:
+            `encoding` - The Unicode encoding (or character set) of
+                the file.  The default is ``None``, meaning the content
+                of the file is read as 8-bit characters and returned
+                as a list of (non-Unicode) str objects.
+            `errors` - How to handle Unicode errors; see help(str.decode)
+                for the options.  Default is ``'strict'``.
+            `retain` - If ``True``, retain newline characters; but all newline
+                character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
+                translated to ``'\n'``.  If ``False``, newline characters are
+                stripped off.  Default is ``True``.
+
+        This uses ``'U'`` mode.
+
+        .. seealso:: :meth:`text`
+        """
+        if encoding is None and retain:
+            with self.open('U') as f:
+                return f.readlines()
+        else:
+            return self.text(encoding, errors).splitlines(retain)
+
+    def write_lines(self, lines, encoding=None, errors='strict',
+                    linesep=os.linesep, append=False):
+        r""" Write the given lines of text to this file.
+
+        By default this overwrites any existing file at this path.
+
+        This puts a platform-specific newline sequence on every line.
+        See `linesep` below.
+
+            `lines` - A list of strings.
+
+            `encoding` - A Unicode encoding to use.  This applies only if
+                `lines` contains any Unicode strings.
+
+            `errors` - How to handle errors in Unicode encoding.  This
+                also applies only to Unicode strings.
+
+            linesep - The desired line-ending.  This line-ending is
+                applied to every line.  If a line already has any
+                standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
+                ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
+                be stripped off and this will be used instead.  The
+                default is os.linesep, which is platform-dependent
+                (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
+                Specify ``None`` to write the lines as-is, like
+                :meth:`file.writelines`.
+
+        Use the keyword argument ``append=True`` to append lines to the
+        file.  The default is to overwrite the file.
+
+        .. warning ::
+
+            When you use this with Unicode data, if the encoding of the
+            existing data in the file is different from the encoding
+            you specify with the `encoding=` parameter, the result is
+            mixed-encoding data, which can really confuse someone trying
+            to read the file later.
+        """
+        with self.open('ab' if append else 'wb') as f:
+            for l in lines:
+                isUnicode = isinstance(l, text_type)
+                if linesep is not None:
+                    pattern = U_NL_END if isUnicode else NL_END
+                    l = pattern.sub('', l) + linesep
+                if isUnicode:
+                    l = l.encode(encoding or sys.getdefaultencoding(), errors)
+                f.write(l)
+
+    def read_md5(self):
+        """ Calculate the md5 hash for this file.
+
+        This reads through the entire file.
+
+        .. seealso:: :meth:`read_hash`
+        """
+        return self.read_hash('md5')
+
+    def _hash(self, hash_name):
+        """ Returns a hash object for the file at the current path.
+
+            `hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``)
+            that's available in the :mod:`hashlib` module.
+        """
+        m = hashlib.new(hash_name)
+        for chunk in self.chunks(8192, mode="rb"):
+            m.update(chunk)
+        return m
+
+    def read_hash(self, hash_name):
+        """ Calculate given hash for this file.
+
+        List of supported hashes can be obtained from :mod:`hashlib` package.
+        This reads the entire file.
+
+        .. seealso:: :meth:`hashlib.hash.digest`
+        """
+        return self._hash(hash_name).digest()
+
+    def read_hexhash(self, hash_name):
+        """ Calculate given hash for this file, returning hexdigest.
+
+        List of supported hashes can be obtained from :mod:`hashlib` package.
+        This reads the entire file.
+
+        .. seealso:: :meth:`hashlib.hash.hexdigest`
+        """
+        return self._hash(hash_name).hexdigest()
+
+    # --- Methods for querying the filesystem.
+    # N.B. On some platforms, the os.path functions may be implemented in C
+    # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
+    # bound. Playing it safe and wrapping them all in method calls.
+
+    def isabs(self):
+        """ .. seealso:: :func:`os.path.isabs` """
+        return self.module.isabs(self)
+
+    def exists(self):
+        """ .. seealso:: :func:`os.path.exists` """
+        return self.module.exists(self)
+
+    def isdir(self):
+        """ .. seealso:: :func:`os.path.isdir` """
+        return self.module.isdir(self)
+
+    def isfile(self):
+        """ .. seealso:: :func:`os.path.isfile` """
+        return self.module.isfile(self)
+
+    def islink(self):
+        """ .. seealso:: :func:`os.path.islink` """
+        return self.module.islink(self)
+
+    def ismount(self):
+        """ .. seealso:: :func:`os.path.ismount` """
+        return self.module.ismount(self)
+
+    def samefile(self, other):
+        """ .. seealso:: :func:`os.path.samefile` """
+        if not hasattr(self.module, 'samefile'):
+            other = Path(other).realpath().normpath().normcase()
+            return self.realpath().normpath().normcase() == other
+        return self.module.samefile(self, other)
+
+    def getatime(self):
+        """ .. seealso:: :attr:`atime`, :func:`os.path.getatime` """
+        return self.module.getatime(self)
+
+    atime = property(
+        getatime, None, None,
+        """ Last access time of the file.
+
+        .. seealso:: :meth:`getatime`, :func:`os.path.getatime`
+        """)
+
+    def getmtime(self):
+        """ .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """
+        return self.module.getmtime(self)
+
+    mtime = property(
+        getmtime, None, None,
+        """ Last-modified time of the file.
+
+        .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime`
+        """)
+
+    def getctime(self):
+        """ .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """
+        return self.module.getctime(self)
+
+    ctime = property(
+        getctime, None, None,
+        """ Creation time of the file.
+
+        .. seealso:: :meth:`getctime`, :func:`os.path.getctime`
+        """)
+
+    def getsize(self):
+        """ .. seealso:: :attr:`size`, :func:`os.path.getsize` """
+        return self.module.getsize(self)
+
+    size = property(
+        getsize, None, None,
+        """ Size of the file, in bytes.
+
+        .. seealso:: :meth:`getsize`, :func:`os.path.getsize`
+        """)
+
+    if hasattr(os, 'access'):
+        def access(self, mode):
+            """ Return ``True`` if current user has access to this path.
+
+            mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`,
+            :data:`os.W_OK`, :data:`os.X_OK`
+
+            .. seealso:: :func:`os.access`
+            """
+            return os.access(self, mode)
+
+    def stat(self):
+        """ Perform a ``stat()`` system call on this path.
+
+        .. seealso:: :meth:`lstat`, :func:`os.stat`
+        """
+        return os.stat(self)
+
+    def lstat(self):
+        """ Like :meth:`stat`, but do not follow symbolic links.
+
+        .. seealso:: :meth:`stat`, :func:`os.lstat`
+        """
+        return os.lstat(self)
+
+    def __get_owner_windows(self):
+        """
+        Return the name of the owner of this file or directory. Follow
+        symbolic links.
+
+        Return a name of the form ``r'DOMAIN\\User Name'``; may be a group.
+
+        .. seealso:: :attr:`owner`
+        """
+        desc = win32security.GetFileSecurity(
+            self, win32security.OWNER_SECURITY_INFORMATION)
+        sid = desc.GetSecurityDescriptorOwner()
+        account, domain, typecode = win32security.LookupAccountSid(None, sid)
+        return domain + '\\' + account
+
+    def __get_owner_unix(self):
+        """
+        Return the name of the owner of this file or directory. Follow
+        symbolic links.
+
+        .. seealso:: :attr:`owner`
+        """
+        st = self.stat()
+        return pwd.getpwuid(st.st_uid).pw_name
+
+    def __get_owner_not_implemented(self):
+        raise NotImplementedError("Ownership not available on this platform.")
+
+    if 'win32security' in globals():
+        get_owner = __get_owner_windows
+    elif 'pwd' in globals():
+        get_owner = __get_owner_unix
+    else:
+        get_owner = __get_owner_not_implemented
+
+    owner = property(
+        get_owner, None, None,
+        """ Name of the owner of this file or directory.
+
+        .. seealso:: :meth:`get_owner`""")
+
+    if hasattr(os, 'statvfs'):
+        def statvfs(self):
+            """ Perform a ``statvfs()`` system call on this path.
+
+            .. seealso:: :func:`os.statvfs`
+            """
+            return os.statvfs(self)
+
+    if hasattr(os, 'pathconf'):
+        def pathconf(self, name):
+            """ .. seealso:: :func:`os.pathconf` """
+            return os.pathconf(self, name)
+
+    #
+    # --- Modifying operations on files and directories
+
+    def utime(self, times):
+        """ Set the access and modified times of this file.
+
+        .. seealso:: :func:`os.utime`
+        """
+        os.utime(self, times)
+        return self
+
+    def chmod(self, mode):
+        """
+        Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
+        mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_.
+
+        .. seealso:: :func:`os.chmod`
+        """
+        if isinstance(mode, string_types):
+            mask = _multi_permission_mask(mode)
+            mode = mask(self.stat().st_mode)
+        os.chmod(self, mode)
+        return self
+
+    def chown(self, uid=-1, gid=-1):
+        """
+        Change the owner and group by names rather than the uid or gid numbers.
+
+        .. seealso:: :func:`os.chown`
+        """
+        if hasattr(os, 'chown'):
+            if 'pwd' in globals() and isinstance(uid, string_types):
+                uid = pwd.getpwnam(uid).pw_uid
+            if 'grp' in globals() and isinstance(gid, string_types):
+                gid = grp.getgrnam(gid).gr_gid
+            os.chown(self, uid, gid)
+        else:
+            raise NotImplementedError("Ownership not available on this platform.")
+        return self
+
+    def rename(self, new):
+        """ .. seealso:: :func:`os.rename` """
+        os.rename(self, new)
+        return self._next_class(new)
+
+    def renames(self, new):
+        """ .. seealso:: :func:`os.renames` """
+        os.renames(self, new)
+        return self._next_class(new)
+
+    #
+    # --- Create/delete operations on directories
+
+    def mkdir(self, mode=0o777):
+        """ .. seealso:: :func:`os.mkdir` """
+        os.mkdir(self, mode)
+        return self
+
+    def mkdir_p(self, mode=0o777):
+        """ Like :meth:`mkdir`, but does not raise an exception if the
+        directory already exists. """
+        try:
+            self.mkdir(mode)
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.EEXIST:
+                raise
+        return self
+
+    def makedirs(self, mode=0o777):
+        """ .. seealso:: :func:`os.makedirs` """
+        os.makedirs(self, mode)
+        return self
+
+    def makedirs_p(self, mode=0o777):
+        """ Like :meth:`makedirs`, but does not raise an exception if the
+        directory already exists. """
+        try:
+            self.makedirs(mode)
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.EEXIST:
+                raise
+        return self
+
+    def rmdir(self):
+        """ .. seealso:: :func:`os.rmdir` """
+        os.rmdir(self)
+        return self
+
+    def rmdir_p(self):
+        """ Like :meth:`rmdir`, but does not raise an exception if the
+        directory is not empty or does not exist. """
+        try:
+            self.rmdir()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
+                raise
+        return self
+
+    def removedirs(self):
+        """ .. seealso:: :func:`os.removedirs` """
+        os.removedirs(self)
+        return self
+
+    def removedirs_p(self):
+        """ Like :meth:`removedirs`, but does not raise an exception if the
+        directory is not empty or does not exist. """
+        try:
+            self.removedirs()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
+                raise
+        return self
+
+    # --- Modifying operations on files
+
+    def touch(self):
+        """ Set the access/modified times of this file to the current time.
+        Create the file if it does not exist.
+        """
+        fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666)
+        os.close(fd)
+        os.utime(self, None)
+        return self
+
+    def remove(self):
+        """ .. seealso:: :func:`os.remove` """
+        os.remove(self)
+        return self
+
+    def remove_p(self):
+        """ Like :meth:`remove`, but does not raise an exception if the
+        file does not exist. """
+        try:
+            self.unlink()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOENT:
+                raise
+        return self
+
+    def unlink(self):
+        """ .. seealso:: :func:`os.unlink` """
+        os.unlink(self)
+        return self
+
+    def unlink_p(self):
+        """ Like :meth:`unlink`, but does not raise an exception if the
+        file does not exist. """
+        self.remove_p()
+        return self
+
+    # --- Links
+
+    if hasattr(os, 'link'):
+        def link(self, newpath):
+            """ Create a hard link at `newpath`, pointing to this file.
+
+            .. seealso:: :func:`os.link`
+            """
+            os.link(self, newpath)
+            return self._next_class(newpath)
+
+    if hasattr(os, 'symlink'):
+        def symlink(self, newlink):
+            """ Create a symbolic link at `newlink`, pointing here.
+
+            .. seealso:: :func:`os.symlink`
+            """
+            os.symlink(self, newlink)
+            return self._next_class(newlink)
+
+    if hasattr(os, 'readlink'):
+        def readlink(self):
+            """ Return the path to which this symbolic link points.
+
+            The result may be an absolute or a relative path.
+
+            .. seealso:: :meth:`readlinkabs`, :func:`os.readlink`
+            """
+            return self._next_class(os.readlink(self))
+
+        def readlinkabs(self):
+            """ Return the path to which this symbolic link points.
+
+            The result is always an absolute path.
+
+            .. seealso:: :meth:`readlink`, :func:`os.readlink`
+            """
+            p = self.readlink()
+            if p.isabs():
+                return p
+            else:
+                return (self.parent / p).abspath()
+
+    # High-level functions from shutil
+    # These functions will be bound to the instance such that
+    # Path(name).copy(target) will invoke shutil.copy(name, target)
+
+    copyfile = shutil.copyfile
+    copymode = shutil.copymode
+    copystat = shutil.copystat
+    copy = shutil.copy
+    copy2 = shutil.copy2
+    copytree = shutil.copytree
+    if hasattr(shutil, 'move'):
+        move = shutil.move
+    rmtree = shutil.rmtree
+
+    def rmtree_p(self):
+        """ Like :meth:`rmtree`, but does not raise an exception if the
+        directory does not exist. """
+        try:
+            self.rmtree()
+        except OSError:
+            _, e, _ = sys.exc_info()
+            if e.errno != errno.ENOENT:
+                raise
+        return self
+
+    def chdir(self):
+        """ .. seealso:: :func:`os.chdir` """
+        os.chdir(self)
+
+    cd = chdir
+
+    def merge_tree(self, dst, symlinks=False, *args, **kwargs):
+        """
+        Copy entire contents of self to dst, overwriting existing
+        contents in dst with those in self.
+
+        If the additional keyword `update` is True, each
+        `src` will only be copied if `dst` does not exist,
+        or `src` is newer than `dst`.
+
+        Note that the technique employed stages the files in a temporary
+        directory first, so this function is not suitable for merging
+        trees with large files, especially if the temporary directory
+        is not capable of storing a copy of the entire source tree.
+        """
+        update = kwargs.pop('update', False)
+        with tempdir() as _temp_dir:
+            # first copy the tree to a stage directory to support
+            #  the parameters and behavior of copytree.
+            stage = _temp_dir / str(hash(self))
+            self.copytree(stage, symlinks, *args, **kwargs)
+            # now copy everything from the stage directory using
+            #  the semantics of dir_util.copy_tree
+            dir_util.copy_tree(stage, dst, preserve_symlinks=symlinks,
+                update=update)
+
+    #
+    # --- Special stuff from os
+
+    if hasattr(os, 'chroot'):
+        def chroot(self):
+            """ .. seealso:: :func:`os.chroot` """
+            os.chroot(self)
+
+    if hasattr(os, 'startfile'):
+        def startfile(self):
+            """ .. seealso:: :func:`os.startfile` """
+            os.startfile(self)
+            return self
+
+    # in-place re-writing, courtesy of Martijn Pieters
+    # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/
+    @contextlib.contextmanager
+    def in_place(self, mode='r', buffering=-1, encoding=None, errors=None,
+            newline=None, backup_extension=None):
+        """
+        A context in which a file may be re-written in-place with new content.
+
+        Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable`
+        replaces `readable`.
+
+        If an exception occurs, the old file is restored, removing the
+        written data.
+
+        Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are
+        allowed. A :exc:`ValueError` is raised on invalid modes.
+
+        For example, to add line numbers to a file::
+
+            p = Path(filename)
+            assert p.isfile()
+            with p.in_place() as (reader, writer):
+                for number, line in enumerate(reader, 1):
+                    writer.write('{0:3}: '.format(number)))
+                    writer.write(line)
+
+        Thereafter, the file at `filename` will have line numbers in it.
+        """
+        import io
+
+        if set(mode).intersection('wa+'):
+            raise ValueError('Only read-only file modes can be used')
+
+        # move existing file to backup, create new file with same permissions
+        # borrowed extensively from the fileinput module
+        backup_fn = self + (backup_extension or os.extsep + 'bak')
+        try:
+            os.unlink(backup_fn)
+        except os.error:
+            pass
+        os.rename(self, backup_fn)
+        readable = io.open(backup_fn, mode, buffering=buffering,
+            encoding=encoding, errors=errors, newline=newline)
+        try:
+            perm = os.fstat(readable.fileno()).st_mode
+        except OSError:
+            writable = open(self, 'w' + mode.replace('r', ''),
+                buffering=buffering, encoding=encoding, errors=errors,
+                newline=newline)
+        else:
+            os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
+            if hasattr(os, 'O_BINARY'):
+                os_mode |= os.O_BINARY
+            fd = os.open(self, os_mode, perm)
+            writable = io.open(fd, "w" + mode.replace('r', ''),
+                buffering=buffering, encoding=encoding, errors=errors,
+                newline=newline)
+            try:
+                if hasattr(os, 'chmod'):
+                    os.chmod(self, perm)
+            except OSError:
+                pass
+        try:
+            yield readable, writable
+        except Exception:
+            # move backup back
+            readable.close()
+            writable.close()
+            try:
+                os.unlink(self)
+            except os.error:
+                pass
+            os.rename(backup_fn, self)
+            raise
+        else:
+            readable.close()
+            writable.close()
+        finally:
+            try:
+                os.unlink(backup_fn)
+            except os.error:
+                pass
+
+    @ClassProperty
+    @classmethod
+    def special(cls):
+        """
+        Return a SpecialResolver object suitable referencing a suitable
+        directory for the relevant platform for the given
+        type of content.
+
+        For example, to get a user config directory, invoke:
+
+            dir = Path.special().user.config
+
+        Uses the `appdirs
+        <https://pypi.python.org/pypi/appdirs/1.4.0>`_ to resolve
+        the paths in a platform-friendly way.
+
+        To create a config directory for 'My App', consider:
+
+            dir = Path.special("My App").user.config.makedirs_p()
+
+        If the ``appdirs`` module is not installed, invocation
+        of special will raise an ImportError.
+        """
+        return functools.partial(SpecialResolver, cls)
+
+
+class SpecialResolver(object):
+    class ResolverScope:
+        def __init__(self, paths, scope):
+            self.paths = paths
+            self.scope = scope
+
+        def __getattr__(self, class_):
+            return self.paths.get_dir(self.scope, class_)
+
+    def __init__(self, path_class, *args, **kwargs):
+        appdirs = importlib.import_module('appdirs')
+
+        # let appname default to None until
+        # https://github.com/ActiveState/appdirs/issues/55 is solved.
+        not args and kwargs.setdefault('appname', None)
+
+        vars(self).update(
+            path_class=path_class,
+            wrapper=appdirs.AppDirs(*args, **kwargs),
+        )
+
+    def __getattr__(self, scope):
+        return self.ResolverScope(self, scope)
+
+    def get_dir(self, scope, class_):
+        """
+        Return the callable function from appdirs, but with the
+        result wrapped in self.path_class
+        """
+        prop_name = '{scope}_{class_}_dir'.format(**locals())
+        value = getattr(self.wrapper, prop_name)
+        MultiPath = Multi.for_class(self.path_class)
+        return MultiPath.detect(value)
+
+
+class Multi:
+    """
+    A mix-in for a Path which may contain multiple Path separated by pathsep.
+    """
+    @classmethod
+    def for_class(cls, path_cls):
+        name = 'Multi' + path_cls.__name__
+        if PY2:
+            name = str(name)
+        return type(name, (cls, path_cls), {})
+
+    @classmethod
+    def detect(cls, input):
+        if os.pathsep not in input:
+            cls = cls._next_class
+        return cls(input)
+
+    def __iter__(self):
+        return iter(map(self._next_class, self.split(os.pathsep)))
+
+    @ClassProperty
+    @classmethod
+    def _next_class(cls):
+        """
+        Multi-subclasses should use the parent class
+        """
+        return next(
+            class_
+            for class_ in cls.__mro__
+            if not issubclass(class_, Multi)
+        )
+
+
+class tempdir(Path):
+    """
+    A temporary directory via :func:`tempfile.mkdtemp`, and constructed with the
+    same parameters that you can use as a context manager.
+
+    Example:
+
+        with tempdir() as d:
+            # do stuff with the Path object "d"
+
+        # here the directory is deleted automatically
+
+    .. seealso:: :func:`tempfile.mkdtemp`
+    """
+
+    @ClassProperty
+    @classmethod
+    def _next_class(cls):
+        return Path
+
+    def __new__(cls, *args, **kwargs):
+        dirname = tempfile.mkdtemp(*args, **kwargs)
+        return super(tempdir, cls).__new__(cls, dirname)
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if not exc_value:
+            self.rmtree()
+
+
+def _multi_permission_mask(mode):
+    """
+    Support multiple, comma-separated Unix chmod symbolic modes.
+
+    >>> _multi_permission_mask('a=r,u+w')(0) == 0o644
+    True
+    """
+    compose = lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs))
+    return functools.reduce(compose, map(_permission_mask, mode.split(',')))
+
+
+def _permission_mask(mode):
+    """
+    Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function
+    suitable for applying to a mask to affect that change.
+
+    >>> mask = _permission_mask('ugo+rwx')
+    >>> mask(0o554) == 0o777
+    True
+
+    >>> _permission_mask('go-x')(0o777) == 0o766
+    True
+
+    >>> _permission_mask('o-x')(0o445) == 0o444
+    True
+
+    >>> _permission_mask('a+x')(0) == 0o111
+    True
+
+    >>> _permission_mask('a=rw')(0o057) == 0o666
+    True
+
+    >>> _permission_mask('u=x')(0o666) == 0o166
+    True
+
+    >>> _permission_mask('g=')(0o157) == 0o107
+    True
+    """
+    # parse the symbolic mode
+    parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode)
+    if not parsed:
+        raise ValueError("Unrecognized symbolic mode", mode)
+
+    # generate a mask representing the specified permission
+    spec_map = dict(r=4, w=2, x=1)
+    specs = (spec_map[perm] for perm in parsed.group('what'))
+    spec = functools.reduce(operator.or_, specs, 0)
+
+    # now apply spec to each subject in who
+    shift_map = dict(u=6, g=3, o=0)
+    who = parsed.group('who').replace('a', 'ugo')
+    masks = (spec << shift_map[subj] for subj in who)
+    mask = functools.reduce(operator.or_, masks)
+
+    op = parsed.group('op')
+
+    # if op is -, invert the mask
+    if op == '-':
+        mask ^= 0o777
+
+    # if op is =, retain extant values for unreferenced subjects
+    if op == '=':
+        masks = (0o7 << shift_map[subj] for subj in who)
+        retain = functools.reduce(operator.or_, masks) ^ 0o777
+
+    op_map = {
+        '+': operator.or_,
+        '-': operator.and_,
+        '=': lambda mask, target: target & retain ^ mask,
+    }
+    return functools.partial(op_map[op], mask)
+
+
+class CaseInsensitivePattern(text_type):
+    """
+    A string with a ``'normcase'`` property, suitable for passing to
+    :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`,
+    :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive.
+
+    For example, to get all files ending in .py, .Py, .pY, or .PY in the
+    current directory::
+
+        from path import Path, CaseInsensitivePattern as ci
+        Path('.').files(ci('*.py'))
+    """
+
+    @property
+    def normcase(self):
+        return __import__('ntpath').normcase
+
+########################
+# Backward-compatibility
+class path(Path):
+    def __new__(cls, *args, **kwargs):
+        msg = "path is deprecated. Use Path instead."
+        warnings.warn(msg, DeprecationWarning)
+        return Path.__new__(cls, *args, **kwargs)
+
+
+__all__ += ['path']
+########################
diff --git a/tasks/_vendor/pathlib.py b/tasks/_vendor/pathlib.py
new file mode 100644
index 0000000..9ab0e70
--- /dev/null
+++ b/tasks/_vendor/pathlib.py
@@ -0,0 +1,1280 @@
+import fnmatch
+import functools
+import io
+import ntpath
+import os
+import posixpath
+import re
+import sys
+import time
+from collections import Sequence
+from contextlib import contextmanager
+from errno import EINVAL, ENOENT
+from operator import attrgetter
+from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
+try:
+    from urllib import quote as urlquote, quote as urlquote_from_bytes
+except ImportError:
+    from urllib.parse import quote as urlquote, quote_from_bytes as urlquote_from_bytes
+
+
+try:
+    intern = intern
+except NameError:
+    intern = sys.intern
+try:
+    basestring = basestring
+except NameError:
+    basestring = str
+
+supports_symlinks = True
+try:
+    import nt
+except ImportError:
+    nt = None
+else:
+    if sys.getwindowsversion()[:2] >= (6, 0) and sys.version_info >= (3, 2):
+        from nt import _getfinalpathname
+    else:
+        supports_symlinks = False
+        _getfinalpathname = None
+
+
+__all__ = [
+    "PurePath", "PurePosixPath", "PureWindowsPath",
+    "Path", "PosixPath", "WindowsPath",
+    ]
+
+#
+# Internals
+#
+
+_py2 = sys.version_info < (3,)
+_py2_fs_encoding = 'ascii'
+
+def _py2_fsencode(parts):
+    # py2 => minimal unicode support
+    return [part.encode(_py2_fs_encoding) if isinstance(part, unicode)
+            else part for part in parts]
+
+def _is_wildcard_pattern(pat):
+    # Whether this pattern needs actual matching using fnmatch, or can
+    # be looked up directly as a file.
+    return "*" in pat or "?" in pat or "[" in pat
+
+
+class _Flavour(object):
+    """A flavour implements a particular (platform-specific) set of path
+    semantics."""
+
+    def __init__(self):
+        self.join = self.sep.join
+
+    def parse_parts(self, parts):
+        if _py2:
+            parts = _py2_fsencode(parts)
+        parsed = []
+        sep = self.sep
+        altsep = self.altsep
+        drv = root = ''
+        it = reversed(parts)
+        for part in it:
+            if not part:
+                continue
+            if altsep:
+                part = part.replace(altsep, sep)
+            drv, root, rel = self.splitroot(part)
+            if sep in rel:
+                for x in reversed(rel.split(sep)):
+                    if x and x != '.':
+                        parsed.append(intern(x))
+            else:
+                if rel and rel != '.':
+                    parsed.append(intern(rel))
+            if drv or root:
+                if not drv:
+                    # If no drive is present, try to find one in the previous
+                    # parts. This makes the result of parsing e.g.
+                    # ("C:", "/", "a") reasonably intuitive.
+                    for part in it:
+                        drv = self.splitroot(part)[0]
+                        if drv:
+                            break
+                break
+        if drv or root:
+            parsed.append(drv + root)
+        parsed.reverse()
+        return drv, root, parsed
+
+    def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
+        """
+        Join the two paths represented by the respective
+        (drive, root, parts) tuples.  Return a new (drive, root, parts) tuple.
+        """
+        if root2:
+            if not drv2 and drv:
+                return drv, root2, [drv + root2] + parts2[1:]
+        elif drv2:
+            if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
+                # Same drive => second path is relative to the first
+                return drv, root, parts + parts2[1:]
+        else:
+            # Second path is non-anchored (common case)
+            return drv, root, parts + parts2
+        return drv2, root2, parts2
+
+
+class _WindowsFlavour(_Flavour):
+    # Reference for Windows paths can be found at
+    # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
+
+    sep = '\\'
+    altsep = '/'
+    has_drv = True
+    pathmod = ntpath
+
+    is_supported = (nt is not None)
+
+    drive_letters = (
+        set(chr(x) for x in range(ord('a'), ord('z') + 1)) |
+        set(chr(x) for x in range(ord('A'), ord('Z') + 1))
+    )
+    ext_namespace_prefix = '\\\\?\\'
+
+    reserved_names = (
+        set(['CON', 'PRN', 'AUX', 'NUL']) |
+        set(['COM%d' % i for i in range(1, 10)]) |
+        set(['LPT%d' % i for i in range(1, 10)])
+        )
+
+    # Interesting findings about extended paths:
+    # - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
+    #   but '\\?\c:/a' is not
+    # - extended paths are always absolute; "relative" extended paths will
+    #   fail.
+
+    def splitroot(self, part, sep=sep):
+        first = part[0:1]
+        second = part[1:2]
+        if (second == sep and first == sep):
+            # XXX extended paths should also disable the collapsing of "."
+            # components (according to MSDN docs).
+            prefix, part = self._split_extended_path(part)
+            first = part[0:1]
+            second = part[1:2]
+        else:
+            prefix = ''
+        third = part[2:3]
+        if (second == sep and first == sep and third != sep):
+            # is a UNC path:
+            # vvvvvvvvvvvvvvvvvvvvv root
+            # \\machine\mountpoint\directory\etc\...
+            #            directory ^^^^^^^^^^^^^^
+            index = part.find(sep, 2)
+            if index != -1:
+                index2 = part.find(sep, index + 1)
+                # a UNC path can't have two slashes in a row
+                # (after the initial two)
+                if index2 != index + 1:
+                    if index2 == -1:
+                        index2 = len(part)
+                    if prefix:
+                        return prefix + part[1:index2], sep, part[index2+1:]
+                    else:
+                        return part[:index2], sep, part[index2+1:]
+        drv = root = ''
+        if second == ':' and first in self.drive_letters:
+            drv = part[:2]
+            part = part[2:]
+            first = third
+        if first == sep:
+            root = first
+            part = part.lstrip(sep)
+        return prefix + drv, root, part
+
+    def casefold(self, s):
+        return s.lower()
+
+    def casefold_parts(self, parts):
+        return [p.lower() for p in parts]
+
+    def resolve(self, path):
+        s = str(path)
+        if not s:
+            return os.getcwd()
+        if _getfinalpathname is not None:
+            return self._ext_to_normal(_getfinalpathname(s))
+        # Means fallback on absolute
+        return None
+
+    def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
+        prefix = ''
+        if s.startswith(ext_prefix):
+            prefix = s[:4]
+            s = s[4:]
+            if s.startswith('UNC\\'):
+                prefix += s[:3]
+                s = '\\' + s[3:]
+        return prefix, s
+
+    def _ext_to_normal(self, s):
+        # Turn back an extended path into a normal DOS-like path
+        return self._split_extended_path(s)[1]
+
+    def is_reserved(self, parts):
+        # NOTE: the rules for reserved names seem somewhat complicated
+        # (e.g. r"..\NUL" is reserved but not r"foo\NUL").
+        # We err on the side of caution and return True for paths which are
+        # not considered reserved by Windows.
+        if not parts:
+            return False
+        if parts[0].startswith('\\\\'):
+            # UNC paths are never reserved
+            return False
+        return parts[-1].partition('.')[0].upper() in self.reserved_names
+
+    def make_uri(self, path):
+        # Under Windows, file URIs use the UTF-8 encoding.
+        drive = path.drive
+        if len(drive) == 2 and drive[1] == ':':
+            # It's a path on a local drive => 'file:///c:/a/b'
+            rest = path.as_posix()[2:].lstrip('/')
+            return 'file:///%s/%s' % (
+                drive, urlquote_from_bytes(rest.encode('utf-8')))
+        else:
+            # It's a path on a network drive => 'file://host/share/a/b'
+            return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
+
+
+class _PosixFlavour(_Flavour):
+    sep = '/'
+    altsep = ''
+    has_drv = False
+    pathmod = posixpath
+
+    is_supported = (os.name != 'nt')
+
+    def splitroot(self, part, sep=sep):
+        if part and part[0] == sep:
+            stripped_part = part.lstrip(sep)
+            # According to POSIX path resolution:
+            # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
+            # "A pathname that begins with two successive slashes may be
+            # interpreted in an implementation-defined manner, although more
+            # than two leading slashes shall be treated as a single slash".
+            if len(part) - len(stripped_part) == 2:
+                return '', sep * 2, stripped_part
+            else:
+                return '', sep, stripped_part
+        else:
+            return '', '', part
+
+    def casefold(self, s):
+        return s
+
+    def casefold_parts(self, parts):
+        return parts
+
+    def resolve(self, path):
+        sep = self.sep
+        accessor = path._accessor
+        seen = {}
+        def _resolve(path, rest):
+            if rest.startswith(sep):
+                path = ''
+
+            for name in rest.split(sep):
+                if not name or name == '.':
+                    # current dir
+                    continue
+                if name == '..':
+                    # parent dir
+                    path, _, _ = path.rpartition(sep)
+                    continue
+                newpath = path + sep + name
+                if newpath in seen:
+                    # Already seen this path
+                    path = seen[newpath]
+                    if path is not None:
+                        # use cached value
+                        continue
+                    # The symlink is not resolved, so we must have a symlink loop.
+                    raise RuntimeError("Symlink loop from %r" % newpath)
+                # Resolve the symbolic link
+                try:
+                    target = accessor.readlink(newpath)
+                except OSError as e:
+                    if e.errno != EINVAL:
+                        raise
+                    # Not a symlink
+                    path = newpath
+                else:
+                    seen[newpath] = None # not resolved symlink
+                    path = _resolve(path, target)
+                    seen[newpath] = path # resolved symlink
+
+            return path
+        # NOTE: according to POSIX, getcwd() cannot contain path components
+        # which are symlinks.
+        base = '' if path.is_absolute() else os.getcwd()
+        return _resolve(base, str(path)) or sep
+
+    def is_reserved(self, parts):
+        return False
+
+    def make_uri(self, path):
+        # We represent the path using the local filesystem encoding,
+        # for portability to other applications.
+        bpath = bytes(path)
+        return 'file://' + urlquote_from_bytes(bpath)
+
+
+_windows_flavour = _WindowsFlavour()
+_posix_flavour = _PosixFlavour()
+
+
+class _Accessor:
+    """An accessor implements a particular (system-specific or not) way of
+    accessing paths on the filesystem."""
+
+
+class _NormalAccessor(_Accessor):
+
+    def _wrap_strfunc(strfunc):
+        @functools.wraps(strfunc)
+        def wrapped(pathobj, *args):
+            return strfunc(str(pathobj), *args)
+        return staticmethod(wrapped)
+
+    def _wrap_binary_strfunc(strfunc):
+        @functools.wraps(strfunc)
+        def wrapped(pathobjA, pathobjB, *args):
+            return strfunc(str(pathobjA), str(pathobjB), *args)
+        return staticmethod(wrapped)
+
+    stat = _wrap_strfunc(os.stat)
+
+    lstat = _wrap_strfunc(os.lstat)
+
+    open = _wrap_strfunc(os.open)
+
+    listdir = _wrap_strfunc(os.listdir)
+
+    chmod = _wrap_strfunc(os.chmod)
+
+    if hasattr(os, "lchmod"):
+        lchmod = _wrap_strfunc(os.lchmod)
+    else:
+        def lchmod(self, pathobj, mode):
+            raise NotImplementedError("lchmod() not available on this system")
+
+    mkdir = _wrap_strfunc(os.mkdir)
+
+    unlink = _wrap_strfunc(os.unlink)
+
+    rmdir = _wrap_strfunc(os.rmdir)
+
+    rename = _wrap_binary_strfunc(os.rename)
+
+    if sys.version_info >= (3, 3):
+        replace = _wrap_binary_strfunc(os.replace)
+
+    if nt:
+        if supports_symlinks:
+            symlink = _wrap_binary_strfunc(os.symlink)
+        else:
+            def symlink(a, b, target_is_directory):
+                raise NotImplementedError("symlink() not available on this system")
+    else:
+        # Under POSIX, os.symlink() takes two args
+        @staticmethod
+        def symlink(a, b, target_is_directory):
+            return os.symlink(str(a), str(b))
+
+    utime = _wrap_strfunc(os.utime)
+
+    # Helper for resolve()
+    def readlink(self, path):
+        return os.readlink(path)
+
+
+_normal_accessor = _NormalAccessor()
+
+
+#
+# Globbing helpers
+#
+
+@contextmanager
+def _cached(func):
+    try:
+        func.__cached__
+        yield func
+    except AttributeError:
+        cache = {}
+        def wrapper(*args):
+            try:
+                return cache[args]
+            except KeyError:
+                value = cache[args] = func(*args)
+                return value
+        wrapper.__cached__ = True
+        try:
+            yield wrapper
+        finally:
+            cache.clear()
+
+def _make_selector(pattern_parts):
+    pat = pattern_parts[0]
+    child_parts = pattern_parts[1:]
+    if pat == '**':
+        cls = _RecursiveWildcardSelector
+    elif '**' in pat:
+        raise ValueError("Invalid pattern: '**' can only be an entire path component")
+    elif _is_wildcard_pattern(pat):
+        cls = _WildcardSelector
+    else:
+        cls = _PreciseSelector
+    return cls(pat, child_parts)
+
+if hasattr(functools, "lru_cache"):
+    _make_selector = functools.lru_cache()(_make_selector)
+
+
+class _Selector:
+    """A selector matches a specific glob pattern part against the children
+    of a given path."""
+
+    def __init__(self, child_parts):
+        self.child_parts = child_parts
+        if child_parts:
+            self.successor = _make_selector(child_parts)
+        else:
+            self.successor = _TerminatingSelector()
+
+    def select_from(self, parent_path):
+        """Iterate over all child paths of `parent_path` matched by this
+        selector.  This can contain parent_path itself."""
+        path_cls = type(parent_path)
+        is_dir = path_cls.is_dir
+        exists = path_cls.exists
+        listdir = parent_path._accessor.listdir
+        return self._select_from(parent_path, is_dir, exists, listdir)
+
+
+class _TerminatingSelector:
+
+    def _select_from(self, parent_path, is_dir, exists, listdir):
+        yield parent_path
+
+
+class _PreciseSelector(_Selector):
+
+    def __init__(self, name, child_parts):
+        self.name = name
+        _Selector.__init__(self, child_parts)
+
+    def _select_from(self, parent_path, is_dir, exists, listdir):
+        if not is_dir(parent_path):
+            return
+        path = parent_path._make_child_relpath(self.name)
+        if exists(path):
+            for p in self.successor._select_from(path, is_dir, exists, listdir):
+                yield p
+
+
+class _WildcardSelector(_Selector):
+
+    def __init__(self, pat, child_parts):
+        self.pat = re.compile(fnmatch.translate(pat))
+        _Selector.__init__(self, child_parts)
+
+    def _select_from(self, parent_path, is_dir, exists, listdir):
+        if not is_dir(parent_path):
+            return
+        cf = parent_path._flavour.casefold
+        for name in listdir(parent_path):
+            casefolded = cf(name)
+            if self.pat.match(casefolded):
+                path = parent_path._make_child_relpath(name)
+                for p in self.successor._select_from(path, is_dir, exists, listdir):
+                    yield p
+
+
+class _RecursiveWildcardSelector(_Selector):
+
+    def __init__(self, pat, child_parts):
+        _Selector.__init__(self, child_parts)
+
+    def _iterate_directories(self, parent_path, is_dir, listdir):
+        yield parent_path
+        for name in listdir(parent_path):
+            path = parent_path._make_child_relpath(name)
+            if is_dir(path):
+                for p in self._iterate_directories(path, is_dir, listdir):
+                    yield p
+
+    def _select_from(self, parent_path, is_dir, exists, listdir):
+        if not is_dir(parent_path):
+            return
+        with _cached(listdir) as listdir:
+            yielded = set()
+            try:
+                successor_select = self.successor._select_from
+                for starting_point in self._iterate_directories(parent_path, is_dir, listdir):
+                    for p in successor_select(starting_point, is_dir, exists, listdir):
+                        if p not in yielded:
+                            yield p
+                            yielded.add(p)
+            finally:
+                yielded.clear()
+
+
+#
+# Public API
+#
+
+class _PathParents(Sequence):
+    """This object provides sequence-like access to the logical ancestors
+    of a path.  Don't try to construct it yourself."""
+    __slots__ = ('_pathcls', '_drv', '_root', '_parts')
+
+    def __init__(self, path):
+        # We don't store the instance to avoid reference cycles
+        self._pathcls = type(path)
+        self._drv = path._drv
+        self._root = path._root
+        self._parts = path._parts
+
+    def __len__(self):
+        if self._drv or self._root:
+            return len(self._parts) - 1
+        else:
+            return len(self._parts)
+
+    def __getitem__(self, idx):
+        if idx < 0 or idx >= len(self):
+            raise IndexError(idx)
+        return self._pathcls._from_parsed_parts(self._drv, self._root,
+                                                self._parts[:-idx - 1])
+
+    def __repr__(self):
+        return "<{0}.parents>".format(self._pathcls.__name__)
+
+
+class PurePath(object):
+    """PurePath represents a filesystem path and offers operations which
+    don't imply any actual filesystem I/O.  Depending on your system,
+    instantiating a PurePath will return either a PurePosixPath or a
+    PureWindowsPath object.  You can also instantiate either of these classes
+    directly, regardless of your system.
+    """
+    __slots__ = (
+        '_drv', '_root', '_parts',
+        '_str', '_hash', '_pparts', '_cached_cparts',
+    )
+
+    def __new__(cls, *args):
+        """Construct a PurePath from one or several strings and or existing
+        PurePath objects.  The strings and path objects are combined so as
+        to yield a canonicalized path, which is incorporated into the
+        new PurePath object.
+        """
+        if cls is PurePath:
+            cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
+        return cls._from_parts(args)
+
+    def __reduce__(self):
+        # Using the parts tuple helps share interned path parts
+        # when pickling related paths.
+        return (self.__class__, tuple(self._parts))
+
+    @classmethod
+    def _parse_args(cls, args):
+        # This is useful when you don't want to create an instance, just
+        # canonicalize some constructor arguments.
+        parts = []
+        for a in args:
+            if isinstance(a, PurePath):
+                parts += a._parts
+            elif isinstance(a, basestring):
+                parts.append(a)
+            else:
+                raise TypeError(
+                    "argument should be a path or str object, not %r"
+                    % type(a))
+        return cls._flavour.parse_parts(parts)
+
+    @classmethod
+    def _from_parts(cls, args, init=True):
+        # We need to call _parse_args on the instance, so as to get the
+        # right flavour.
+        self = object.__new__(cls)
+        drv, root, parts = self._parse_args(args)
+        self._drv = drv
+        self._root = root
+        self._parts = parts
+        if init:
+            self._init()
+        return self
+
+    @classmethod
+    def _from_parsed_parts(cls, drv, root, parts, init=True):
+        self = object.__new__(cls)
+        self._drv = drv
+        self._root = root
+        self._parts = parts
+        if init:
+            self._init()
+        return self
+
+    @classmethod
+    def _format_parsed_parts(cls, drv, root, parts):
+        if drv or root:
+            return drv + root + cls._flavour.join(parts[1:])
+        else:
+            return cls._flavour.join(parts)
+
+    def _init(self):
+        # Overriden in concrete Path
+        pass
+
+    def _make_child(self, args):
+        drv, root, parts = self._parse_args(args)
+        drv, root, parts = self._flavour.join_parsed_parts(
+            self._drv, self._root, self._parts, drv, root, parts)
+        return self._from_parsed_parts(drv, root, parts)
+
+    def __str__(self):
+        """Return the string representation of the path, suitable for
+        passing to system calls."""
+        try:
+            return self._str
+        except AttributeError:
+            self._str = self._format_parsed_parts(self._drv, self._root,
+                                                  self._parts) or '.'
+            return self._str
+
+    def as_posix(self):
+        """Return the string representation of the path with forward (/)
+        slashes."""
+        f = self._flavour
+        return str(self).replace(f.sep, '/')
+
+    def __bytes__(self):
+        """Return the bytes representation of the path.  This is only
+        recommended to use under Unix."""
+        if sys.version_info < (3, 2):
+            raise NotImplementedError("needs Python 3.2 or later")
+        return os.fsencode(str(self))
+
+    def __repr__(self):
+        return "{0}({1!r})".format(self.__class__.__name__, self.as_posix())
+
+    def as_uri(self):
+        """Return the path as a 'file' URI."""
+        if not self.is_absolute():
+            raise ValueError("relative path can't be expressed as a file URI")
+        return self._flavour.make_uri(self)
+
+    @property
+    def _cparts(self):
+        # Cached casefolded parts, for hashing and comparison
+        try:
+            return self._cached_cparts
+        except AttributeError:
+            self._cached_cparts = self._flavour.casefold_parts(self._parts)
+            return self._cached_cparts
+
+    def __eq__(self, other):
+        if not isinstance(other, PurePath):
+            return NotImplemented
+        return self._cparts == other._cparts and self._flavour is other._flavour
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __hash__(self):
+        try:
+            return self._hash
+        except AttributeError:
+            self._hash = hash(tuple(self._cparts))
+            return self._hash
+
+    def __lt__(self, other):
+        if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+            return NotImplemented
+        return self._cparts < other._cparts
+
+    def __le__(self, other):
+        if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+            return NotImplemented
+        return self._cparts <= other._cparts
+
+    def __gt__(self, other):
+        if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+            return NotImplemented
+        return self._cparts > other._cparts
+
+    def __ge__(self, other):
+        if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+            return NotImplemented
+        return self._cparts >= other._cparts
+
+    drive = property(attrgetter('_drv'),
+                     doc="""The drive prefix (letter or UNC path), if any.""")
+
+    root = property(attrgetter('_root'),
+                    doc="""The root of the path, if any.""")
+
+    @property
+    def anchor(self):
+        """The concatenation of the drive and root, or ''."""
+        anchor = self._drv + self._root
+        return anchor
+
+    @property
+    def name(self):
+        """The final path component, if any."""
+        parts = self._parts
+        if len(parts) == (1 if (self._drv or self._root) else 0):
+            return ''
+        return parts[-1]
+
+    @property
+    def suffix(self):
+        """The final component's last suffix, if any."""
+        name = self.name
+        i = name.rfind('.')
+        if 0 < i < len(name) - 1:
+            return name[i:]
+        else:
+            return ''
+
+    @property
+    def suffixes(self):
+        """A list of the final component's suffixes, if any."""
+        name = self.name
+        if name.endswith('.'):
+            return []
+        name = name.lstrip('.')
+        return ['.' + suffix for suffix in name.split('.')[1:]]
+
+    @property
+    def stem(self):
+        """The final path component, minus its last suffix."""
+        name = self.name
+        i = name.rfind('.')
+        if 0 < i < len(name) - 1:
+            return name[:i]
+        else:
+            return name
+
+    def with_name(self, name):
+        """Return a new path with the file name changed."""
+        if not self.name:
+            raise ValueError("%r has an empty name" % (self,))
+        return self._from_parsed_parts(self._drv, self._root,
+                                       self._parts[:-1] + [name])
+
+    def with_suffix(self, suffix):
+        """Return a new path with the file suffix changed (or added, if none)."""
+        # XXX if suffix is None, should the current suffix be removed?
+        drv, root, parts = self._flavour.parse_parts((suffix,))
+        if drv or root or len(parts) != 1:
+            raise ValueError("Invalid suffix %r" % (suffix))
+        suffix = parts[0]
+        if not suffix.startswith('.'):
+            raise ValueError("Invalid suffix %r" % (suffix))
+        name = self.name
+        if not name:
+            raise ValueError("%r has an empty name" % (self,))
+        old_suffix = self.suffix
+        if not old_suffix:
+            name = name + suffix
+        else:
+            name = name[:-len(old_suffix)] + suffix
+        return self._from_parsed_parts(self._drv, self._root,
+                                       self._parts[:-1] + [name])
+
+    def relative_to(self, *other):
+        """Return the relative path to another path identified by the passed
+        arguments.  If the operation is not possible (because this is not
+        a subpath of the other path), raise ValueError.
+        """
+        # For the purpose of this method, drive and root are considered
+        # separate parts, i.e.:
+        #   Path('c:/').relative_to('c:')  gives Path('/')
+        #   Path('c:/').relative_to('/')   raise ValueError
+        if not other:
+            raise TypeError("need at least one argument")
+        parts = self._parts
+        drv = self._drv
+        root = self._root
+        if root:
+            abs_parts = [drv, root] + parts[1:]
+        else:
+            abs_parts = parts
+        to_drv, to_root, to_parts = self._parse_args(other)
+        if to_root:
+            to_abs_parts = [to_drv, to_root] + to_parts[1:]
+        else:
+            to_abs_parts = to_parts
+        n = len(to_abs_parts)
+        cf = self._flavour.casefold_parts
+        if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
+            formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
+            raise ValueError("{!r} does not start with {!r}"
+                             .format(str(self), str(formatted)))
+        return self._from_parsed_parts('', root if n == 1 else '',
+                                       abs_parts[n:])
+
+    @property
+    def parts(self):
+        """An object providing sequence-like access to the
+        components in the filesystem path."""
+        # We cache the tuple to avoid building a new one each time .parts
+        # is accessed.  XXX is this necessary?
+        try:
+            return self._pparts
+        except AttributeError:
+            self._pparts = tuple(self._parts)
+            return self._pparts
+
+    def joinpath(self, *args):
+        """Combine this path with one or several arguments, and return a
+        new path representing either a subpath (if all arguments are relative
+        paths) or a totally different path (if one of the arguments is
+        anchored).
+        """
+        return self._make_child(args)
+
+    def __truediv__(self, key):
+        return self._make_child((key,))
+
+    def __rtruediv__(self, key):
+        return self._from_parts([key] + self._parts)
+
+    if sys.version_info < (3,):
+        __div__ = __truediv__
+        __rdiv__ = __rtruediv__
+
+    @property
+    def parent(self):
+        """The logical parent of the path."""
+        drv = self._drv
+        root = self._root
+        parts = self._parts
+        if len(parts) == 1 and (drv or root):
+            return self
+        return self._from_parsed_parts(drv, root, parts[:-1])
+
+    @property
+    def parents(self):
+        """A sequence of this path's logical parents."""
+        return _PathParents(self)
+
+    def is_absolute(self):
+        """True if the path is absolute (has both a root and, if applicable,
+        a drive)."""
+        if not self._root:
+            return False
+        return not self._flavour.has_drv or bool(self._drv)
+
+    def is_reserved(self):
+        """Return True if the path contains one of the special names reserved
+        by the system, if any."""
+        return self._flavour.is_reserved(self._parts)
+
+    def match(self, path_pattern):
+        """
+        Return True if this path matches the given pattern.
+        """
+        cf = self._flavour.casefold
+        path_pattern = cf(path_pattern)
+        drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
+        if not pat_parts:
+            raise ValueError("empty pattern")
+        if drv and drv != cf(self._drv):
+            return False
+        if root and root != cf(self._root):
+            return False
+        parts = self._cparts
+        if drv or root:
+            if len(pat_parts) != len(parts):
+                return False
+            pat_parts = pat_parts[1:]
+        elif len(pat_parts) > len(parts):
+            return False
+        for part, pat in zip(reversed(parts), reversed(pat_parts)):
+            if not fnmatch.fnmatchcase(part, pat):
+                return False
+        return True
+
+
+class PurePosixPath(PurePath):
+    _flavour = _posix_flavour
+    __slots__ = ()
+
+
+class PureWindowsPath(PurePath):
+    _flavour = _windows_flavour
+    __slots__ = ()
+
+
+# Filesystem-accessing classes
+
+
+class Path(PurePath):
+    __slots__ = (
+        '_accessor',
+    )
+
+    def __new__(cls, *args, **kwargs):
+        if cls is Path:
+            cls = WindowsPath if os.name == 'nt' else PosixPath
+        self = cls._from_parts(args, init=False)
+        if not self._flavour.is_supported:
+            raise NotImplementedError("cannot instantiate %r on your system"
+                                      % (cls.__name__,))
+        self._init()
+        return self
+
+    def _init(self,
+              # Private non-constructor arguments
+              template=None,
+              ):
+        if template is not None:
+            self._accessor = template._accessor
+        else:
+            self._accessor = _normal_accessor
+
+    def _make_child_relpath(self, part):
+        # This is an optimization used for dir walking.  `part` must be
+        # a single part relative to this path.
+        parts = self._parts + [part]
+        return self._from_parsed_parts(self._drv, self._root, parts)
+
+    def _opener(self, name, flags, mode=0o666):
+        # A stub for the opener argument to built-in open()
+        return self._accessor.open(self, flags, mode)
+
+    def _raw_open(self, flags, mode=0o777):
+        """
+        Open the file pointed by this path and return a file descriptor,
+        as os.open() does.
+        """
+        return self._accessor.open(self, flags, mode)
+
+    # Public API
+
+    @classmethod
+    def cwd(cls):
+        """Return a new path pointing to the current working directory
+        (as returned by os.getcwd()).
+        """
+        return cls(os.getcwd())
+
+    def iterdir(self):
+        """Iterate over the files in this directory.  Does not yield any
+        result for the special paths '.' and '..'.
+        """
+        for name in self._accessor.listdir(self):
+            if name in ('.', '..'):
+                # Yielding a path object for these makes little sense
+                continue
+            yield self._make_child_relpath(name)
+
+    def glob(self, pattern):
+        """Iterate over this subtree and yield all existing files (of any
+        kind, including directories) matching the given pattern.
+        """
+        pattern = self._flavour.casefold(pattern)
+        drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
+        if drv or root:
+            raise NotImplementedError("Non-relative patterns are unsupported")
+        selector = _make_selector(tuple(pattern_parts))
+        for p in selector.select_from(self):
+            yield p
+
+    def rglob(self, pattern):
+        """Recursively yield all existing files (of any kind, including
+        directories) matching the given pattern, anywhere in this subtree.
+        """
+        pattern = self._flavour.casefold(pattern)
+        drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
+        if drv or root:
+            raise NotImplementedError("Non-relative patterns are unsupported")
+        selector = _make_selector(("**",) + tuple(pattern_parts))
+        for p in selector.select_from(self):
+            yield p
+
+    def absolute(self):
+        """Return an absolute version of this path.  This function works
+        even if the path doesn't point to anything.
+
+        No normalization is done, i.e. all '.' and '..' will be kept along.
+        Use resolve() to get the canonical path to a file.
+        """
+        # XXX untested yet!
+        if self.is_absolute():
+            return self
+        # FIXME this must defer to the specific flavour (and, under Windows,
+        # use nt._getfullpathname())
+        obj = self._from_parts([os.getcwd()] + self._parts, init=False)
+        obj._init(template=self)
+        return obj
+
+    def resolve(self):
+        """
+        Make the path absolute, resolving all symlinks on the way and also
+        normalizing it (for example turning slashes into backslashes under
+        Windows).
+        """
+        s = self._flavour.resolve(self)
+        if s is None:
+            # No symlink resolution => for consistency, raise an error if
+            # the path doesn't exist or is forbidden
+            self.stat()
+            s = str(self.absolute())
+        # Now we have no symlinks in the path, it's safe to normalize it.
+        normed = self._flavour.pathmod.normpath(s)
+        obj = self._from_parts((normed,), init=False)
+        obj._init(template=self)
+        return obj
+
+    def stat(self):
+        """
+        Return the result of the stat() system call on this path, like
+        os.stat() does.
+        """
+        return self._accessor.stat(self)
+
+    def owner(self):
+        """
+        Return the login name of the file owner.
+        """
+        import pwd
+        return pwd.getpwuid(self.stat().st_uid).pw_name
+
+    def group(self):
+        """
+        Return the group name of the file gid.
+        """
+        import grp
+        return grp.getgrgid(self.stat().st_gid).gr_name
+
+    def open(self, mode='r', buffering=-1, encoding=None,
+             errors=None, newline=None):
+        """
+        Open the file pointed by this path and return a file object, as
+        the built-in open() function does.
+        """
+        if sys.version_info >= (3, 3):
+            return io.open(str(self), mode, buffering, encoding, errors, newline,
+                           opener=self._opener)
+        else:
+            return io.open(str(self), mode, buffering, encoding, errors, newline)
+
+    def touch(self, mode=0o666, exist_ok=True):
+        """
+        Create this file with the given access mode, if it doesn't exist.
+        """
+        if exist_ok:
+            # First try to bump modification time
+            # Implementation note: GNU touch uses the UTIME_NOW option of
+            # the utimensat() / futimens() functions.
+            t = time.time()
+            try:
+                self._accessor.utime(self, (t, t))
+            except OSError:
+                # Avoid exception chaining
+                pass
+            else:
+                return
+        flags = os.O_CREAT | os.O_WRONLY
+        if not exist_ok:
+            flags |= os.O_EXCL
+        fd = self._raw_open(flags, mode)
+        os.close(fd)
+
+    def mkdir(self, mode=0o777, parents=False):
+        if not parents:
+            self._accessor.mkdir(self, mode)
+        else:
+            try:
+                self._accessor.mkdir(self, mode)
+            except OSError as e:
+                if e.errno != ENOENT:
+                    raise
+                self.parent.mkdir(parents=True)
+                self._accessor.mkdir(self, mode)
+
+    def chmod(self, mode):
+        """
+        Change the permissions of the path, like os.chmod().
+        """
+        self._accessor.chmod(self, mode)
+
+    def lchmod(self, mode):
+        """
+        Like chmod(), except if the path points to a symlink, the symlink's
+        permissions are changed, rather than its target's.
+        """
+        self._accessor.lchmod(self, mode)
+
+    def unlink(self):
+        """
+        Remove this file or link.
+        If the path is a directory, use rmdir() instead.
+        """
+        self._accessor.unlink(self)
+
+    def rmdir(self):
+        """
+        Remove this directory.  The directory must be empty.
+        """
+        self._accessor.rmdir(self)
+
+    def lstat(self):
+        """
+        Like stat(), except if the path points to a symlink, the symlink's
+        status information is returned, rather than its target's.
+        """
+        return self._accessor.lstat(self)
+
+    def rename(self, target):
+        """
+        Rename this path to the given path.
+        """
+        self._accessor.rename(self, target)
+
+    def replace(self, target):
+        """
+        Rename this path to the given path, clobbering the existing
+        destination if it exists.
+        """
+        if sys.version_info < (3, 3):
+            raise NotImplementedError("replace() is only available "
+                                      "with Python 3.3 and later")
+        self._accessor.replace(self, target)
+
+    def symlink_to(self, target, target_is_directory=False):
+        """
+        Make this path a symlink pointing to the given path.
+        Note the order of arguments (self, target) is the reverse of os.symlink's.
+        """
+        self._accessor.symlink(target, self, target_is_directory)
+
+    # Convenience functions for querying the stat results
+
+    def exists(self):
+        """
+        Whether this path exists.
+        """
+        try:
+            self.stat()
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            return False
+        return True
+
+    def is_dir(self):
+        """
+        Whether this path is a directory.
+        """
+        try:
+            return S_ISDIR(self.stat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist or is a broken symlink
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            return False
+
+    def is_file(self):
+        """
+        Whether this path is a regular file (also True for symlinks pointing
+        to regular files).
+        """
+        try:
+            return S_ISREG(self.stat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist or is a broken symlink
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            return False
+
+    def is_symlink(self):
+        """
+        Whether this path is a symbolic link.
+        """
+        try:
+            return S_ISLNK(self.lstat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist
+            return False
+
+    def is_block_device(self):
+        """
+        Whether this path is a block device.
+        """
+        try:
+            return S_ISBLK(self.stat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist or is a broken symlink
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            return False
+
+    def is_char_device(self):
+        """
+        Whether this path is a character device.
+        """
+        try:
+            return S_ISCHR(self.stat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist or is a broken symlink
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            return False
+
+    def is_fifo(self):
+        """
+        Whether this path is a FIFO.
+        """
+        try:
+            return S_ISFIFO(self.stat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist or is a broken symlink
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            return False
+
+    def is_socket(self):
+        """
+        Whether this path is a socket.
+        """
+        try:
+            return S_ISSOCK(self.stat().st_mode)
+        except OSError as e:
+            if e.errno != ENOENT:
+                raise
+            # Path doesn't exist or is a broken symlink
+            # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+            return False
+
+
+class PosixPath(Path, PurePosixPath):
+    __slots__ = ()
+
+class WindowsPath(Path, PureWindowsPath):
+    __slots__ = ()
+
diff --git a/tasks/_vendor/six.py b/tasks/_vendor/six.py
new file mode 100644
index 0000000..190c023
--- /dev/null
+++ b/tasks/_vendor/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/tasks/docs.py b/tasks/docs.py
new file mode 100644
index 0000000..3360279
--- /dev/null
+++ b/tasks/docs.py
@@ -0,0 +1,198 @@
+# -*- coding: UTF-8 -*-
+"""
+Provides tasks to build documentation with sphinx, etc.
+"""
+
+from __future__ import absolute_import, print_function
+import os
+import sys
+from invoke import task, Collection
+from invoke.util import cd
+from path import Path
+
+# -- TASK-LIBRARY:
+from ._tasklet_cleanup import cleanup_tasks, cleanup_dirs
+
+
+# -----------------------------------------------------------------------------
+# CONSTANTS:
+# -----------------------------------------------------------------------------
+SPHINX_LANGUAGE_DEFAULT = os.environ.get("SPHINX_LANGUAGE", "en")
+
+
+# -----------------------------------------------------------------------------
+# UTILTITIES:
+# -----------------------------------------------------------------------------
+def _sphinxdoc_get_language(ctx, language=None):
+    language = language or ctx.config.sphinx.language or SPHINX_LANGUAGE_DEFAULT
+    return language
+
+
+def _sphinxdoc_get_destdir(ctx, builder, language=None):
+    if builder == "gettext":
+        # -- CASE: not LANGUAGE-SPECIFIC
+        destdir = Path(ctx.config.sphinx.destdir or "build")/builder
+    else:
+        # -- CASE: LANGUAGE-SPECIFIC:
+        language = _sphinxdoc_get_language(ctx, language)
+        destdir = Path(ctx.config.sphinx.destdir or "build")/builder/language
+    return destdir
+
+
+# -----------------------------------------------------------------------------
+# TASKS:
+# -----------------------------------------------------------------------------
+@task
+def clean(ctx, dry_run=False):
+    """Cleanup generated document artifacts."""
+    basedir = ctx.sphinx.destdir or "build/docs"
+    cleanup_dirs([basedir], dry_run=dry_run)
+
+
+@task(help={
+    "builder": "Builder to use (html, ...)",
+    "language": "Language to use (en, ...)",
+    "options": "Additional options for sphinx-build",
+})
+def build(ctx, builder="html", language=None, options=""):
+    """Build docs with sphinx-build"""
+    language = _sphinxdoc_get_language(ctx, language)
+    sourcedir = ctx.config.sphinx.sourcedir
+    destdir = _sphinxdoc_get_destdir(ctx, builder, language=language)
+    destdir = destdir.abspath()
+    with cd(sourcedir):
+        destdir_relative = Path(".").relpathto(destdir)
+        command = "sphinx-build {opts} -b {builder} -D language={language} {sourcedir} {destdir}" \
+                    .format(builder=builder, sourcedir=".",
+                            destdir=destdir_relative,
+                            language=language,
+                            opts=options)
+        ctx.run(command)
+
+@task(help={
+    "builder": "Builder to use (html, ...)",
+    "language": "Language to use (en, ...)",
+    "options": "Additional options for sphinx-build",
+})
+def rebuild(ctx, builder="html", language=None, options=""):
+    """Rebuilds the docs.
+    Perform the steps: clean, build
+    """
+    clean(ctx)
+    build(ctx, builder=builder, language=None, options=options)
+
+@task
+def linkcheck(ctx):
+    """Check if all links are corect."""
+    build(ctx, builder="linkcheck")
+
+@task(help={"language": "Language to use (en, ...)"})
+def browse(ctx, language=None):
+    """Open documentation in web browser."""
+    output_dir = _sphinxdoc_get_destdir(ctx, "html", language=language)
+    page_html = Path(output_dir)/"index.html"
+    if not page_html.exists():
+        build(ctx, builder="html")
+    assert page_html.exists()
+    open_cmd = "open"   # -- WORKS ON: MACOSX
+    if sys.platform.startswith("win"):
+        open_cmd = "start"
+    ctx.run("{open} {page_html}".format(open=open_cmd, page_html=page_html))
+    # ctx.run('python -m webbrowser -t {page_html}'.format(page_html=page_html))
+    # -- DISABLED:
+    # import webbrowser
+    # print("Starting webbrowser with page=%s" % page_html)
+    # webbrowser.open(str(page_html))
+
+
+@task(help={
+    "dest": "Destination directory to save docs",
+    "format": "Format/Builder to use (html, ...)",
+    "language": "Language to use (en, ...)",
+})
+# pylint: disable=redefined-builtin
+def save(ctx, dest="docs.html", format="html", language=None):
+    """Save/update docs under destination directory."""
+    print("STEP: Generate docs in HTML format")
+    build(ctx, builder=format, language=language)
+
+    print("STEP: Save docs under %s/" % dest)
+    source_dir = Path(_sphinxdoc_get_destdir(ctx, format, language=language))
+    Path(dest).rmtree_p()
+    source_dir.copytree(dest)
+
+    # -- POST-PROCESSING: Polish up.
+    for part in [".buildinfo", ".doctrees"]:
+        partpath = Path(dest)/part
+        if partpath.isdir():
+            partpath.rmtree_p()
+        elif partpath.exists():
+            partpath.remove_p()
+
+
+@task(help={
+    "language": 'Language to use, like "en" (default: "all" to build all).',
+})
+def update_translation(ctx, language="all"):
+    """Update sphinx-doc translation(s) messages from the "English" docs.
+
+    * Generates gettext *.po files in "build/docs/gettext/" directory
+    * Updates/generates gettext *.po per language in "docs/LOCALE/{language}/"
+
+    .. note:: Afterwards, the missing message translations can be filled in.
+
+    :param language: Indicate which language messages to update (or "all").
+
+    REQUIRES:
+
+    * sphinx
+    * sphinx-intl >= 0.9
+
+    .. seealso:: https://github.com/sphinx-doc/sphinx-intl
+    """
+    if language == "all":
+        # -- CASE: Process/update all support languages (translations).
+        DEFAULT_LANGUAGES = os.environ.get("SPHINXINTL_LANGUAGE", None)
+        if DEFAULT_LANGUAGES:
+            # -- EXAMPLE: SPHINXINTL_LANGUAGE="de,ja"
+            DEFAULT_LANGUAGES = DEFAULT_LANGUAGES.split(",")
+        languages = ctx.config.sphinx.languages or DEFAULT_LANGUAGES
+    else:
+        # -- CASE: Process only one language (translation use case).
+        languages = [language]
+
+    # -- STEP: Generate *.po/*.pot files w/ sphinx-build -b gettext
+    build(ctx, builder="gettext")
+
+    # -- STEP: Update *.po/*.pot files w/ sphinx-intl
+    if languages:
+        gettext_build_dir = _sphinxdoc_get_destdir(ctx, "gettext").abspath()
+        docs_sourcedir = ctx.config.sphinx.sourcedir
+        languages_opts = "-l "+ " -l ".join(languages)
+        with ctx.cd(docs_sourcedir):
+            ctx.run("sphinx-intl update -p {gettext_dir} {languages}".format(
+                    gettext_dir=gettext_build_dir.relpath(docs_sourcedir),
+                    languages=languages_opts))
+    else:
+        print("OOPS: No languages specified (use: SPHINXINTL_LANGUAGE=...)")
+
+
+# -----------------------------------------------------------------------------
+# TASK CONFIGURATION:
+# -----------------------------------------------------------------------------
+namespace = Collection(clean, rebuild, linkcheck, browse, save, update_translation)
+namespace.add_task(build, default=True)
+namespace.configure({
+    "sphinx": {
+        # -- FOR TASKS: docs.build, docs.rebuild, docs.clean, ...
+        "language": SPHINX_LANGUAGE_DEFAULT,
+        "sourcedir": "docs",
+        "destdir": "build/docs",
+        # -- FOR TASK: docs.update_translation
+        "languages": None,  # -- List of language translations, like: de, ja, ...
+    }
+})
+
+# -- ADD CLEANUP TASK:
+cleanup_tasks.add_task(clean, "clean_docs")
+cleanup_tasks.configure(namespace.configuration())
diff --git a/tasks/py.requirements.txt b/tasks/py.requirements.txt
new file mode 100644
index 0000000..c34d1cb
--- /dev/null
+++ b/tasks/py.requirements.txt
@@ -0,0 +1,19 @@
+# ============================================================================
+# INVOKE PYTHON PACKAGE REQUIREMENTS: For tasks
+# ============================================================================
+# DESCRIPTION:
+#    pip install -r <THIS_FILE>
+#
+# SEE ALSO:
+#  * http://www.pip-installer.org/
+# ============================================================================
+
+invoke >= 1.2.0
+path.py >= 11.5.0
+pycmd
+six >= 1.12.0
+
+# -- PYTHON2 BACKPORTS:
+pathlib;    python_version <= '3.4'
+backports.shutil_which; python_version <= '3.3'
+
diff --git a/tasks/release.py b/tasks/release.py
new file mode 100644
index 0000000..bea347e
--- /dev/null
+++ b/tasks/release.py
@@ -0,0 +1,226 @@
+# -*- coding: UTF-8 -*-
+"""
+Tasks for releasing this project.
+
+Normal steps::
+
+
+    python setup.py sdist bdist_wheel
+
+    twine register dist/{project}-{version}.tar.gz
+    twine upload   dist/*
+
+    twine upload  --skip-existing dist/*
+
+    python setup.py upload
+    # -- DEPRECATED: No longer supported -> Use RTD instead
+    # -- DEPRECATED: python setup.py upload_docs
+
+pypi repositories:
+
+    * https://pypi.python.org/pypi
+    * https://testpypi.python.org/pypi  (not working anymore)
+    * https://test.pypi.org/legacy/     (not working anymore)
+
+Configuration file for pypi repositories:
+
+.. code-block:: init
+
+    # -- FILE: $HOME/.pypirc
+    [distutils]
+    index-servers =
+        pypi
+        testpypi
+
+    [pypi]
+    # DEPRECATED: repository = https://pypi.python.org/pypi
+    username = __USERNAME_HERE__
+    password:
+
+    [testpypi]
+    # DEPRECATED: repository = https://test.pypi.org/legacy
+    username = __USERNAME_HERE__
+    password:
+
+.. seealso::
+
+    * https://packaging.python.org/
+    * https://packaging.python.org/guides/
+    * https://packaging.python.org/tutorials/distributing-packages/
+"""
+
+from __future__ import absolute_import, print_function
+from invoke import Collection, task
+from ._tasklet_cleanup import path_glob
+from ._dry_run import DryRunContext
+
+
+# -----------------------------------------------------------------------------
+# TASKS:
+# -----------------------------------------------------------------------------
+@task
+def checklist(ctx=None):    # pylint: disable=unused-argument
+    """Checklist for releasing this project."""
+    checklist_text = """PRE-RELEASE CHECKLIST:
+[ ]  Everything is checked in
+[ ]  All tests pass w/ tox
+
+RELEASE CHECKLIST:
+[{x1}]  Bump version to new-version and tag repository (via bump_version)
+[{x2}]  Build packages (sdist, bdist_wheel via prepare)
+[{x3}]  Register and upload packages to testpypi repository (first)
+[{x4}]    Verify release is OK and packages from testpypi are usable
+[{x5}]  Register and upload packages to pypi repository
+[{x6}]  Push last changes to Github repository
+
+POST-RELEASE CHECKLIST:
+[ ]  Bump version to new-develop-version (via bump_version)
+[ ]  Adapt CHANGES (if necessary)
+[ ]  Commit latest changes to Github repository
+"""
+    steps = dict(x1=None, x2=None, x3=None, x4=None, x5=None, x6=None)
+    yesno_map = {True: "x", False: "_", None: " "}
+    answers = {name: yesno_map[value]
+               for name, value in steps.items()}
+    print(checklist_text.format(**answers))
+
+
+@task(name="bump_version")
+def bump_version(ctx, new_version, version_part=None, dry_run=False):
+    """Bump version (to prepare a new release)."""
+    version_part = version_part or "minor"
+    if dry_run:
+        ctx = DryRunContext(ctx)
+    ctx.run("bumpversion --new-version={} {}".format(new_version,
+                                                     version_part))
+
+
+@task(name="build", aliases=["build_packages"])
+def build_packages(ctx, hide=False):
+    """Build packages for this release."""
+    print("build_packages:")
+    ctx.run("python setup.py sdist bdist_wheel", echo=True, hide=hide)
+
+
+@task
+def prepare(ctx, new_version=None, version_part=None, hide=True,
+            dry_run=False):
+    """Prepare the release: bump version, build packages, ..."""
+    if new_version is not None:
+        bump_version(ctx, new_version, version_part=version_part,
+                     dry_run=dry_run)
+    build_packages(ctx, hide=hide)
+    packages = ensure_packages_exist(ctx, check_only=True)
+    print_packages(packages)
+
+# -- NOT-NEEDED:
+# @task(name="register")
+# def register_packages(ctx, repo=None, dry_run=False):
+#     """Register release (packages) in artifact-store/repository."""
+#     original_ctx = ctx
+#     if repo is None:
+#         repo = ctx.project.repo or "pypi"
+#     if dry_run:
+#         ctx = DryRunContext(ctx)
+
+#     packages = ensure_packages_exist(original_ctx)
+#     print_packages(packages)
+#     for artifact in packages:
+#         ctx.run("twine register --repository={repo} {artifact}".format(
+#                 artifact=artifact, repo=repo))
+
+
+@task
+def upload(ctx, repo=None, repo_url=None, dry_run=False,
+           skip_existing=False, verbose=False):
+    """Upload release packages to repository (artifact-store)."""
+    if repo is None:
+        repo = ctx.project.repo or "pypi"
+    if repo_url is None:
+        repo_url = ctx.project.repo_url or None
+    original_ctx = ctx
+    if dry_run:
+        ctx = DryRunContext(ctx)
+
+    # -- OPTIONS:
+    opts = []
+    if repo_url:
+        opts.append("--repository-url={0}".format(repo_url))
+    elif repo:
+        opts.append("--repository={0}".format(repo))
+    if skip_existing:
+        opts.append("--skip-existing")
+    if verbose:
+        opts.append("--verbose")
+
+    packages = ensure_packages_exist(original_ctx)
+    print_packages(packages)
+    ctx.run("twine upload {opts} dist/*".format(opts=" ".join(opts)))
+
+    # ctx.run("twine upload --repository={repo} dist/*".format(repo=repo))
+    # 2018-05-05 WORK-AROUND for new https://pypi.org/:
+    #   twine upload --repository-url=https://upload.pypi.org/legacy /dist/*
+    # NOT-WORKING: repo_url = "https://upload.pypi.org/simple/"
+    #
+    # ctx.run("twine upload --repository-url={repo_url} {opts} dist/*".format(
+    #    repo_url=repo_url, opts=" ".join(opts)))
+    # ctx.run("twine upload --repository={repo} {opts} dist/*".format(
+    #         repo=repo, opts=" ".join(opts)))
+
+
+# -- DEPRECATED: Use RTD instead
+# @task(name="upload_docs")
+# def upload_docs(ctx, repo=None, dry_run=False):
+#     """Upload and publish docs.
+#
+#     NOTE: Docs are built first.
+#     """
+#     if repo is None:
+#         repo = ctx.project.repo or "pypi"
+#     if dry_run:
+#         ctx = DryRunContext(ctx)
+#
+#     ctx.run("python setup.py upload_docs")
+#
+# -----------------------------------------------------------------------------
+# TASK HELPERS:
+# -----------------------------------------------------------------------------
+def print_packages(packages):
+    print("PACKAGES[%d]:" % len(packages))
+    for package in packages:
+        package_size = package.stat().st_size
+        package_time = package.stat().st_mtime
+        print("  - %s  (size=%s)" % (package, package_size))
+
+
+def ensure_packages_exist(ctx, pattern=None, check_only=False):
+    if pattern is None:
+        project_name = ctx.project.name
+        project_prefix = project_name.replace("_", "-").split("-")[0]
+        pattern = "dist/%s*" % project_prefix
+
+    packages = list(path_glob(pattern, current_dir="."))
+    if not packages:
+        if check_only:
+            message = "No artifacts found: pattern=%s" % pattern
+            raise RuntimeError(message)
+        else:
+            # -- RECURSIVE-SELF-CALL: Once
+            print("NO-PACKAGES-FOUND: Build packages first ...")
+            build_packages(ctx, hide=True)
+            packages = ensure_packages_exist(ctx, pattern,
+                                             check_only=True)
+    return packages
+
+
+# -----------------------------------------------------------------------------
+# TASK CONFIGURATION:
+# -----------------------------------------------------------------------------
+# DISABLED: register_packages
+namespace = Collection(bump_version, checklist, prepare, build_packages, upload)
+namespace.configure({
+    "project": {
+        "repo": "pypi",
+        "repo_url": None,
+    }
+})
diff --git a/tasks/test.py b/tasks/test.py
new file mode 100644
index 0000000..f1c4991
--- /dev/null
+++ b/tasks/test.py
@@ -0,0 +1,207 @@
+# -*- coding: UTF-8 -*-
+"""
+Invoke test tasks.
+"""
+
+from __future__ import print_function
+import os.path
+import sys
+from invoke import task, Collection
+
+# -- TASK-LIBRARY:
+from ._tasklet_cleanup import cleanup_tasks, cleanup_dirs, cleanup_files
+
+
+# ---------------------------------------------------------------------------
+# CONSTANTS:
+# ---------------------------------------------------------------------------
+USE_BEHAVE = False
+
+
+# ---------------------------------------------------------------------------
+# TASKS
+# ---------------------------------------------------------------------------
+@task(name="all", help={
+    "args": "Command line args for test run.",
+})
+def test_all(ctx, args="", options=""):
+    """Run all tests (default)."""
+    pytest_args = select_by_prefix(args, ctx.pytest.scopes)
+    behave_args = None
+    if USE_BEHAVE:
+        behave_args = select_by_prefix(args, ctx.behave_test.scopes)
+    pytest_should_run = not args or (args and pytest_args)
+    behave_should_run = not args or (args and behave_args)
+    if pytest_should_run:
+        pytest(ctx, pytest_args, options=options)
+    if behave_should_run and USE_BEHAVE:
+        behave(ctx, behave_args, options=options)
+
+
+@task
+def clean(ctx, dry_run=False):
+    """Cleanup (temporary) test artifacts."""
+    directories = ctx.test.clean.directories or []
+    files = ctx.test.clean.files or []
+    cleanup_dirs(directories, dry_run=dry_run)
+    cleanup_files(files, dry_run=dry_run)
+
+
+@task(name="unit")
+def unittest(ctx, args="", options=""):
+    """Run unit tests."""
+    pytest(ctx, args, options)
+
+
+@task
+def pytest(ctx, args="", options=""):
+    """Run unit tests."""
+    args = args or ctx.pytest.args
+    options = options or ctx.pytest.options
+    ctx.run("pytest {options} {args}".format(options=options, args=args))
+
+
+@task(help={
+    "args": "Command line args for behave",
+    "format": "Formatter to use (progress, pretty, ...)",
+})
+def behave(ctx, args="", format="", options=""):
+    """Run behave tests."""
+    format = format or ctx.behave_test.format
+    options = options or ctx.behave_test.options
+    args = args or ctx.behave_test.args
+    if os.path.exists("bin/behave"):
+        behave_cmd = "{python} bin/behave".format(python=sys.executable)
+    else:
+        behave_cmd = "{python} -m behave".format(python=sys.executable)
+
+    for group_args in grouped_by_prefix(args, ctx.behave_test.scopes):
+        ctx.run("{behave} -f {format} {options} {args}".format(
+            behave=behave_cmd, format=format, options=options, args=group_args))
+
+
+@task(help={
+    "args":     "Tests to run (empty: all)",
+    "report":   "Coverage report format to use (report, html, xml)",
+})
+def coverage(ctx, args="", report="report", append=False):
+    """Determine test coverage (run pytest, behave)"""
+    append = append or ctx.coverage.append
+    report_formats = ctx.coverage.report_formats or []
+    if report not in report_formats:
+        report_formats.insert(0, report)
+    opts = []
+    if append:
+        opts.append("--append")
+
+    pytest_args = select_by_prefix(args, ctx.pytest.scopes)
+    behave_args = select_by_prefix(args, ctx.behave_test.scopes)
+    pytest_should_run = not args or (args and pytest_args)
+    behave_should_run = not args or (args and behave_args) and USE_BEHAVE
+    if not args:
+        behave_args = ctx.behave_test.args or "features"
+    if isinstance(pytest_args, list):
+        pytest_args = " ".join(pytest_args)
+    if isinstance(behave_args, list):
+        behave_args = " ".join(behave_args)
+
+    # -- RUN TESTS WITH COVERAGE:
+    if pytest_should_run:
+        ctx.run("coverage run {options} -m pytest {args}".format(
+            args=pytest_args, options=" ".join(opts)))
+    if behave_should_run and USE_BEHAVE:
+        behave_options = ctx.behave_test.coverage_options or ""
+        os.environ["COVERAGE_PROCESS_START"] = os.path.abspath(".coveragerc")
+        behave(ctx, args=behave_args, options=behave_options)
+        del os.environ["COVERAGE_PROCESS_START"]
+
+    # -- POST-PROCESSING:
+    ctx.run("coverage combine")
+    for report_format in report_formats:
+        ctx.run("coverage {report_format}".format(report_format=report_format))
+
+
+# ---------------------------------------------------------------------------
+# UTILITIES:
+# ---------------------------------------------------------------------------
+def select_prefix_for(arg, prefixes):
+    for prefix in prefixes:
+        if arg.startswith(prefix):
+            return prefix
+    return os.path.dirname(arg)
+
+
+def select_by_prefix(args, prefixes):
+    selected = []
+    for arg in args.strip().split():
+        assert not arg.startswith("-"), "REQUIRE: arg, not options"
+        scope = select_prefix_for(arg, prefixes)
+        if scope:
+            selected.append(arg)
+    return " ".join(selected)
+
+
+def grouped_by_prefix(args, prefixes):
+    """Group behave args by (directory) scope into multiple test-runs."""
+    group_args = []
+    current_scope = None
+    for arg in args.strip().split():
+        assert not arg.startswith("-"), "REQUIRE: arg, not options"
+        scope = select_prefix_for(arg, prefixes)
+        if scope != current_scope:
+            if group_args:
+                # -- DETECTED GROUP-END:
+                yield " ".join(group_args)
+                group_args = []
+            current_scope = scope
+        group_args.append(arg)
+    if group_args:
+        yield " ".join(group_args)
+
+
+# ---------------------------------------------------------------------------
+# TASK MANAGEMENT / CONFIGURATION
+# ---------------------------------------------------------------------------
+namespace = Collection(clean, unittest, pytest, coverage)
+namespace.add_task(test_all, default=True)
+if USE_BEHAVE:
+    namespace.add_task(behave)
+
+namespace.configure({
+    "test": {
+        "clean": {
+            "directories": [
+                ".cache", "assets",                         # -- TEST RUNS
+                # -- BEHAVE-SPECIFIC:
+                "__WORKDIR__", "reports", "test_results",
+            ],
+            "files": [
+                ".coverage", ".coverage.*",
+                # -- BEHAVE-SPECIFIC:
+                "report.html",
+                "rerun*.txt", "rerun*.featureset", "testrun*.json",
+            ],
+        },
+    },
+    "pytest": {
+        "scopes":   ["tests"],
+        "args":   "",
+        "options": "",  # -- NOTE:  Overide in configfile "invoke.yaml"
+    },
+    # "behave_test": behave.namespace._configuration["behave_test"],
+    "behave_test": {
+        "scopes":   ["features"],
+        "args":     "features",
+        "format":   "progress",
+        "options":  "",  # -- NOTE:  Overide in configfile "invoke.yaml"
+        "coverage_options": "",
+    },
+    "coverage": {
+        "append":   False,
+        "report_formats": ["report", "html"],
+    },
+})
+
+# -- ADD CLEANUP TASK:
+cleanup_tasks.add_task(clean, "clean_test")
+cleanup_tasks.configure(namespace.configuration())
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/parse_type_test.py b/tests/parse_type_test.py
new file mode 100755
index 0000000..e254279
--- /dev/null
+++ b/tests/parse_type_test.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+from parse_type import TypeBuilder
+from enum import Enum
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+
+# -----------------------------------------------------------------------------
+# TEST SUPPORT FOR: TypeBuilder Tests
+# -----------------------------------------------------------------------------
+# -- PROOF-OF-CONCEPT DATATYPE:
+def parse_number(text):
+    return int(text)
+parse_number.pattern = r"\d+"   # Provide better regexp pattern than default.
+parse_number.name = "Number"    # For testing only.
+
+# -- ENUM DATATYPE:
+parse_yesno = TypeBuilder.make_enum({
+    "yes":  True,   "no":  False,
+    "on":   True,   "off": False,
+    "true": True,   "false": False,
+})
+parse_yesno.name = "YesNo"      # For testing only.
+
+# -- ENUM CLASS:
+class Color(Enum):
+    red = 1
+    green = 2
+    blue = 3
+
+parse_color = TypeBuilder.make_enum(Color)
+parse_color.name = "Color"
+
+# -- CHOICE DATATYPE:
+parse_person_choice = TypeBuilder.make_choice(["Alice", "Bob", "Charly"])
+parse_person_choice.name = "PersonChoice"      # For testing only.
+
+
+# -----------------------------------------------------------------------------
+# ABSTRACT TEST CASE:
+# -----------------------------------------------------------------------------
+class TestCase(unittest.TestCase):
+
+    # -- PYTHON VERSION BACKWARD-COMPATIBILTY:
+    if not hasattr(unittest.TestCase, "assertIsNone"):
+        def assertIsNone(self, obj, msg=None):
+            self.assert_(obj is None, msg)
+
+        def assertIsNotNone(self, obj, msg=None):
+            self.assert_(obj is not None, msg)
+
+
+class ParseTypeTestCase(TestCase):
+    """
+    Common test case base class for :mod:`parse_type` tests.
+    """
+
+    def assert_match(self, parser, text, param_name, expected):
+        """
+        Check that a parser can parse the provided text and extracts the
+        expected value for a parameter.
+
+        :param parser: Parser to use
+        :param text:   Text to parse
+        :param param_name: Name of parameter
+        :param expected:   Expected value of parameter.
+        :raise: AssertionError on failures.
+        """
+        result = parser.parse(text)
+        self.assertIsNotNone(result)
+        self.assertEqual(result[param_name], expected)
+
+    def assert_mismatch(self, parser, text, param_name=None):
+        """
+        Check that a parser cannot extract the parameter from the provided text.
+        A parse mismatch has occured.
+
+        :param parser: Parser to use
+        :param text:   Text to parse
+        :param param_name: Name of parameter
+        :raise: AssertionError on failures.
+        """
+        result = parser.parse(text)
+        self.assertIsNone(result)
+
+    def ensure_can_parse_all_enum_values(self, parser, type_converter,
+                                         schema, name):
+        # -- ENSURE: Known enum values are correctly extracted.
+        for value_name, value in type_converter.mappings.items():
+            text = schema % value_name
+            self.assert_match(parser, text, name,  value)
+
+    def ensure_can_parse_all_choices(self, parser, type_converter, schema, name):
+        transform = getattr(type_converter, "transform", None)
+        for choice_value in type_converter.choices:
+            text = schema % choice_value
+            expected_value = choice_value
+            if transform:
+                assert callable(transform)
+                expected_value = transform(choice_value)
+            self.assert_match(parser, text, name,  expected_value)
+
+    def ensure_can_parse_all_choices2(self, parser, type_converter, schema, name):
+        transform = getattr(type_converter, "transform", None)
+        for index, choice_value in enumerate(type_converter.choices):
+            text = schema % choice_value
+            if transform:
+                assert callable(transform)
+                expected_value = (index, transform(choice_value))
+            else:
+                expected_value = (index, choice_value)
+            self.assert_match(parser, text, name, expected_value)
+
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_builder.py b/tests/test_builder.py
new file mode 100755
index 0000000..bf6582a
--- /dev/null
+++ b/tests/test_builder.py
@@ -0,0 +1,551 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test suite  for parse_type.py
+
+REQUIRES: parse >= 1.8.4 ('pattern' attribute support)
+"""
+
+from __future__ import absolute_import
+import re
+import unittest
+import parse
+from .parse_type_test import ParseTypeTestCase
+from .parse_type_test \
+    import parse_number, parse_yesno, parse_person_choice, parse_color, Color
+from parse_type import TypeBuilder, build_type_dict
+from enum import Enum
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE: TestTypeBuilder4Enum
+# -----------------------------------------------------------------------------
+class TestTypeBuilder4Enum(ParseTypeTestCase):
+
+    TYPE_CONVERTERS = [ parse_yesno ]
+
+    def test_parse_enum_yesno(self):
+        extra_types = build_type_dict([ parse_yesno ])
+        schema = "Answer: {answer:YesNo}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.ensure_can_parse_all_enum_values(parser,
+                parse_yesno, "Answer: %s", "answer")
+
+        # -- VALID:
+        self.assert_match(parser, "Answer: yes", "answer", True)
+        self.assert_match(parser, "Answer: no",  "answer", False)
+
+        # -- IGNORE-CASE: In parsing, calls type converter function !!!
+        self.assert_match(parser, "Answer: YES", "answer", True)
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __YES__", "answer")
+        self.assert_mismatch(parser, "Answer: yes ",    "answer")
+        self.assert_mismatch(parser, "Answer: yes ZZZ", "answer")
+
+    def test_make_enum_with_dict(self):
+        parse_nword = TypeBuilder.make_enum({"one": 1, "two": 2, "three": 3})
+        parse_nword.name = "NumberAsWord"
+
+        extra_types = build_type_dict([ parse_nword ])
+        schema = "Answer: {number:NumberAsWord}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.ensure_can_parse_all_enum_values(parser,
+            parse_nword, "Answer: %s", "number")
+
+        # -- VALID:
+        self.assert_match(parser, "Answer: one", "number", 1)
+        self.assert_match(parser, "Answer: two", "number", 2)
+
+        # -- IGNORE-CASE: In parsing, calls type converter function !!!
+        self.assert_match(parser, "Answer: THREE", "number", 3)
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __one__", "number")
+        self.assert_mismatch(parser, "Answer: one ",    "number")
+        self.assert_mismatch(parser, "Answer: one_",    "number")
+        self.assert_mismatch(parser, "Answer: one ZZZ", "number")
+
+    def test_make_enum_with_enum_class(self):
+        """
+        Use :meth:`parse_type.TypeBuilder.make_enum()` with enum34 classes.
+        """
+        class Color(Enum):
+            red = 1
+            green = 2
+            blue = 3
+
+        parse_color = TypeBuilder.make_enum(Color)
+        parse_color.name = "Color"
+        schema = "Answer: {color:Color}"
+        parser = parse.Parser(schema, dict(Color=parse_color))
+
+        # -- PERFORM TESTS:
+        self.ensure_can_parse_all_enum_values(parser,
+                parse_color, "Answer: %s", "color")
+
+        # -- VALID:
+        self.assert_match(parser, "Answer: red",   "color", Color.red)
+        self.assert_match(parser, "Answer: green", "color", Color.green)
+        self.assert_match(parser, "Answer: blue",  "color", Color.blue)
+
+        # -- IGNORE-CASE: In parsing, calls type converter function !!!
+        self.assert_match(parser, "Answer: RED", "color", Color.red)
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __RED__", "color")
+        self.assert_mismatch(parser, "Answer: red ",    "color")
+        self.assert_mismatch(parser, "Answer: redx",    "color")
+        self.assert_mismatch(parser, "Answer: redx ZZZ", "color")
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE: TestTypeBuilder4Choice
+# -----------------------------------------------------------------------------
+class TestTypeBuilder4Choice(ParseTypeTestCase):
+
+    def test_parse_choice_persons(self):
+        extra_types = build_type_dict([ parse_person_choice ])
+        schema = "Answer: {answer:PersonChoice}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "Answer: Alice", "answer", "Alice")
+        self.assert_match(parser, "Answer: Bob",   "answer", "Bob")
+        self.ensure_can_parse_all_choices(parser,
+                    parse_person_choice, "Answer: %s", "answer")
+
+        # -- IGNORE-CASE: In parsing, calls type converter function !!!
+        # SKIP-WART: self.assert_match(parser, "Answer: BOB", "answer", "BOB")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __Alice__", "answer")
+        self.assert_mismatch(parser, "Answer: Alice ",    "answer")
+        self.assert_mismatch(parser, "Answer: Alice ZZZ", "answer")
+
+    def test_make_choice(self):
+        parse_choice = TypeBuilder.make_choice(["one", "two", "three"])
+        parse_choice.name = "NumberWordChoice"
+        extra_types = build_type_dict([ parse_choice ])
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "Answer: one", "answer", "one")
+        self.assert_match(parser, "Answer: two", "answer", "two")
+        self.ensure_can_parse_all_choices(parser,
+                    parse_choice, "Answer: %s", "answer")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __one__", "answer")
+        self.assert_mismatch(parser, "Answer: one ",    "answer")
+        self.assert_mismatch(parser, "Answer: one ZZZ", "answer")
+
+    def test_make_choice__anycase_accepted_case_sensitity(self):
+        # -- NOTE: strict=False => Disable errors due to case-mismatch.
+        parse_choice = TypeBuilder.make_choice(["one", "two", "three"],
+                                               strict=False)
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice))
+
+        # -- PERFORM TESTS:
+        # NOTE: Parser uses re.IGNORECASE flag => Any case accepted.
+        self.assert_match(parser, "Answer: one",   "answer", "one")
+        self.assert_match(parser, "Answer: TWO",   "answer", "TWO")
+        self.assert_match(parser, "Answer: Three", "answer", "Three")
+
+    def test_make_choice__samecase_match_or_error(self):
+        # -- NOTE: strict=True => Enable errors due to case-mismatch.
+        parse_choice = TypeBuilder.make_choice(["One", "TWO", "three"],
+                                               strict=True)
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice))
+
+        # -- PERFORM TESTS: Case matches.
+        # NOTE: Parser uses re.IGNORECASE flag => Any case accepted.
+        self.assert_match(parser, "Answer: One",   "answer", "One")
+        self.assert_match(parser, "Answer: TWO",   "answer", "TWO")
+        self.assert_match(parser, "Answer: three", "answer", "three")
+
+        # -- PERFORM TESTS: EXACT-CASE MISMATCH
+        case_mismatch_input_data = ["one", "ONE", "Two", "two", "Three" ]
+        for input_value in case_mismatch_input_data:
+            input_text = "Answer: %s" % input_value
+            with self.assertRaises(ValueError):
+                parser.parse(input_text)
+
+    def test_make_choice__anycase_accepted_lowercase_enforced(self):
+        # -- NOTE: strict=True => Enable errors due to case-mismatch.
+        parse_choice = TypeBuilder.make_choice(["one", "two", "three"],
+                            transform=lambda x: x.lower(), strict=True)
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice))
+
+        # -- PERFORM TESTS:
+        # NOTE: Parser uses re.IGNORECASE flag
+        # => Any case accepted, but result is in lower case.
+        self.assert_match(parser, "Answer: one",   "answer", "one")
+        self.assert_match(parser, "Answer: TWO",   "answer", "two")
+        self.assert_match(parser, "Answer: Three", "answer", "three")
+
+    def test_make_choice__with_transform(self):
+        transform = lambda x: x.upper()
+        parse_choice = TypeBuilder.make_choice(["ONE", "two", "Three"],
+                                               transform)
+        self.assertSequenceEqual(parse_choice.choices, ["ONE", "TWO", "THREE"])
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice))
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "Answer: one", "answer", "ONE")
+        self.assert_match(parser, "Answer: two", "answer", "TWO")
+        self.ensure_can_parse_all_choices(parser,
+                    parse_choice, "Answer: %s", "answer")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __one__", "answer")
+        self.assert_mismatch(parser, "Answer: one ",    "answer")
+        self.assert_mismatch(parser, "Answer: one ZZZ", "answer")
+
+    def test_make_choice2(self):
+        # -- strict=False: Disable errors due to case mismatch.
+        parse_choice2 = TypeBuilder.make_choice2(["zero", "one", "two"],
+                                                 strict=False)
+        parse_choice2.name = "NumberWordChoice2"
+        extra_types = build_type_dict([ parse_choice2 ])
+        schema = "Answer: {answer:NumberWordChoice2}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "Answer: zero", "answer", (0, "zero"))
+        self.assert_match(parser, "Answer: one",  "answer", (1, "one"))
+        self.assert_match(parser, "Answer: two",  "answer", (2, "two"))
+        self.ensure_can_parse_all_choices2(parser,
+                parse_choice2, "Answer: %s", "answer")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Answer: __one__", "answer")
+        self.assert_mismatch(parser, "Answer: one ",    "answer")
+        self.assert_mismatch(parser, "Answer: one ZZZ", "answer")
+
+    def test_make_choice2__with_transform(self):
+        transform = lambda x: x.lower()
+        parse_choice2 = TypeBuilder.make_choice2(["ZERO", "one", "Two"],
+                                        transform=transform)
+        self.assertSequenceEqual(parse_choice2.choices, ["zero", "one", "two"])
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice2))
+
+        # -- PERFORM TESTS:
+        # NOTE: Parser uses re.IGNORECASE => Any case is accepted.
+        self.assert_match(parser, "Answer: zERO", "answer", (0, "zero"))
+        self.assert_match(parser, "Answer: ONE", "answer",  (1, "one"))
+        self.assert_match(parser, "Answer: Two", "answer",  (2, "two"))
+
+    def test_make_choice2__samecase_match_or_error(self):
+        # -- NOTE: strict=True => Enable errors due to case-mismatch.
+        parse_choice2 = TypeBuilder.make_choice2(["Zero", "one", "TWO"],
+                                                 strict=True)
+        schema = "Answer: {answer:NumberWordChoice}"
+        parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice2))
+
+        # -- PERFORM TESTS: Case matches.
+        # NOTE: Parser uses re.IGNORECASE flag => Any case accepted.
+        self.assert_match(parser, "Answer: Zero", "answer", (0, "Zero"))
+        self.assert_match(parser, "Answer: one",  "answer", (1, "one"))
+        self.assert_match(parser, "Answer: TWO",  "answer", (2, "TWO"))
+
+        # -- PERFORM TESTS: EXACT-CASE MISMATCH
+        case_mismatch_input_data = ["zero", "ZERO", "One", "ONE", "two" ]
+        for input_value in case_mismatch_input_data:
+            input_text = "Answer: %s" % input_value
+            with self.assertRaises(ValueError):
+                parser.parse(input_text)
+
+# -----------------------------------------------------------------------------
+# TEST CASE: TestTypeBuilder4Variant
+# -----------------------------------------------------------------------------
+class TestTypeBuilder4Variant(ParseTypeTestCase):
+
+    TYPE_CONVERTERS = [ parse_number, parse_yesno ]
+
+    def check_parse_variant_number_or_yesno(self, parse_variant,
+                                            with_ignorecase=True):
+        schema = "Variant: {variant:YesNo_or_Number}"
+        parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant))
+
+        # -- TYPE 1: YesNo
+        self.assert_match(parser, "Variant: yes", "variant", True)
+        self.assert_match(parser, "Variant: no",  "variant", False)
+        # -- IGNORECASE problem => re_opts
+        if with_ignorecase:
+            self.assert_match(parser, "Variant: YES", "variant", True)
+
+        # -- TYPE 2: Number
+        self.assert_match(parser, "Variant: 0",  "variant",  0)
+        self.assert_match(parser, "Variant: 1",  "variant",  1)
+        self.assert_match(parser, "Variant: 12", "variant", 12)
+        self.assert_match(parser, "Variant: 42", "variant", 42)
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Variant: __YES__")
+        self.assert_mismatch(parser, "Variant: yes ")
+        self.assert_mismatch(parser, "Variant: yes ZZZ")
+        self.assert_mismatch(parser, "Variant: -1")
+
+        # -- PERFORM TESTS:
+        self.ensure_can_parse_all_enum_values(parser,
+                    parse_yesno, "Variant: %s", "variant")
+
+    def test_make_variant__uncompiled(self):
+        type_converters = [parse_yesno, parse_number]
+        parse_variant1 = TypeBuilder.make_variant(type_converters)
+        self.check_parse_variant_number_or_yesno(parse_variant1)
+
+    def test_make_variant__compiled(self):
+        # -- REVERSED ORDER VARIANT:
+        type_converters = [parse_number, parse_yesno]
+        parse_variant2 = TypeBuilder.make_variant(type_converters,
+                                                  compiled=True)
+        self.check_parse_variant_number_or_yesno(parse_variant2)
+
+
+    def test_make_variant__with_re_opts_0(self):
+        # -- SKIP: IGNORECASE checks which would raise an error in strict mode.
+        type_converters = [parse_number, parse_yesno]
+        parse_variant3 = TypeBuilder.make_variant(type_converters, re_opts=0)
+        self.check_parse_variant_number_or_yesno(parse_variant3,
+                                                 with_ignorecase=False)
+
+    def test_make_variant__with_re_opts_IGNORECASE(self):
+        type_converters = [parse_number, parse_yesno]
+        parse_variant3 = TypeBuilder.make_variant(type_converters,
+                                                  re_opts=re.IGNORECASE)
+        self.check_parse_variant_number_or_yesno(parse_variant3)
+
+    def test_make_variant__with_strict(self):
+        # -- SKIP: IGNORECASE checks which would raise an error in strict mode.
+        type_converters = [parse_number, parse_yesno]
+        parse_variant = TypeBuilder.make_variant(type_converters, strict=True)
+        self.check_parse_variant_number_or_yesno(parse_variant,
+                                                 with_ignorecase=False)
+
+    def test_make_variant__with_strict_raises_error_on_case_mismatch(self):
+        # -- NEEDS:
+        #  * re_opts=0 (IGNORECASE disabled)
+        #  * strict=True, allow that an error is raised
+        type_converters = [parse_number, parse_yesno]
+        parse_variant = TypeBuilder.make_variant(type_converters,
+                                                 strict=True, re_opts=0)
+        schema = "Variant: {variant:YesNo_or_Number}"
+        parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant))
+        self.assertRaises(AssertionError,  parser.parse, "Variant: YES")
+
+    def test_make_variant__without_strict_may_return_none_on_case_mismatch(self):
+        # -- NEEDS:
+        #  * re_opts=0 (IGNORECASE disabled)
+        #  * strict=False, otherwise an error is raised
+        type_converters = [parse_number, parse_yesno]
+        parse_variant = TypeBuilder.make_variant(type_converters, re_opts=0,
+                                                 strict=False)
+        schema = "Variant: {variant:YesNo_or_Number}"
+        parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant))
+        result = parser.parse("Variant: No")
+        self.assertNotEqual(result, None)
+        self.assertEqual(result["variant"], None)
+
+    def test_make_variant__with_strict_and_compiled_raises_error_on_case_mismatch(self):
+        # XXX re_opts=0 seems to work differently.
+        # -- NEEDS:
+        #  * re_opts=0 (IGNORECASE disabled)
+        #  * strict=True, allow that an error is raised
+        type_converters = [parse_number, parse_yesno]
+        # -- ENSURE: coverage for cornercase.
+        parse_number.matcher = re.compile(parse_number.pattern)
+
+        parse_variant = TypeBuilder.make_variant(type_converters,
+                                        compiled=True, re_opts=0, strict=True)
+        schema = "Variant: {variant:YesNo_or_Number}"
+        parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant))
+        # XXX self.assertRaises(AssertionError,  parser.parse, "Variant: YES")
+        result = parser.parse("Variant: Yes")
+        self.assertNotEqual(result, None)
+        self.assertEqual(result["variant"], True)
+
+    def test_make_variant__without_strict_and_compiled_may_return_none_on_case_mismatch(self):
+        # XXX re_opts=0 seems to work differently.
+        # -- NEEDS:
+        #  * re_opts=0 (IGNORECASE disabled)
+        #  * strict=False, otherwise an error is raised
+        type_converters = [parse_number, parse_yesno]
+        parse_variant = TypeBuilder.make_variant(type_converters,
+                                        compiled=True, re_opts=0, strict=True)
+        schema = "Variant: {variant:YesNo_or_Number}"
+        parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant))
+        result = parser.parse("Variant: NO")
+        self.assertNotEqual(result, None)
+        self.assertEqual(result["variant"], False)
+
+
+    def test_make_variant__with_color_or_person(self):
+        type_converters = [parse_color, parse_person_choice]
+        parse_variant2 = TypeBuilder.make_variant(type_converters)
+        schema = "Variant2: {variant:Color_or_Person}"
+        parser = parse.Parser(schema, dict(Color_or_Person=parse_variant2))
+
+        # -- TYPE 1: Color
+        self.assert_match(parser, "Variant2: red",  "variant", Color.red)
+        self.assert_match(parser, "Variant2: blue", "variant", Color.blue)
+
+        # -- TYPE 2: Person
+        self.assert_match(parser, "Variant2: Alice",  "variant", "Alice")
+        self.assert_match(parser, "Variant2: Bob",    "variant", "Bob")
+        self.assert_match(parser, "Variant2: Charly", "variant", "Charly")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Variant2: __Alice__")
+        self.assert_mismatch(parser, "Variant2: Alice ")
+        self.assert_mismatch(parser, "Variant2: Alice2")
+        self.assert_mismatch(parser, "Variant2: red2")
+
+        # -- PERFORM TESTS:
+        self.ensure_can_parse_all_enum_values(parser,
+                    parse_color, "Variant2: %s", "variant")
+
+        self.ensure_can_parse_all_choices(parser,
+                    parse_person_choice, "Variant2: %s", "variant")
+
+
+class TestParserWithManyTypedFields(ParseTypeTestCase):
+
+    parse_variant1 = TypeBuilder.make_variant([parse_number, parse_yesno])
+    parse_variant1.name = "Number_or_YesNo"
+    parse_variant2 = TypeBuilder.make_variant([parse_color, parse_person_choice])
+    parse_variant2.name = "Color_or_PersonChoice"
+    TYPE_CONVERTERS = [
+        parse_number,
+        parse_yesno,
+        parse_color,
+        parse_person_choice,
+        parse_variant1,
+        parse_variant2,
+    ]
+
+    def test_parse_with_many_named_fields(self):
+        type_dict = build_type_dict(self.TYPE_CONVERTERS)
+        schema = """\
+Number:   {number:Number}
+YesNo:    {answer:YesNo}
+Color:    {color:Color}
+Person:   {person:PersonChoice}
+Variant1: {variant1:Number_or_YesNo}
+Variant2: {variant2:Color_or_PersonChoice}
+"""
+        parser = parse.Parser(schema, type_dict)
+
+        text = """\
+Number:   12
+YesNo:    yes
+Color:    red
+Person:   Alice
+Variant1: 42
+Variant2: Bob
+"""
+        expected = dict(
+            number=12,
+            answer=True,
+            color=Color.red,
+            person="Alice",
+            variant1=42,
+            variant2="Bob"
+        )
+
+        result = parser.parse(text)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.named, expected)
+
+    def test_parse_with_many_unnamed_fields(self):
+        type_dict = build_type_dict(self.TYPE_CONVERTERS)
+        schema = """\
+Number:   {:Number}
+YesNo:    {:YesNo}
+Color:    {:Color}
+Person:   {:PersonChoice}
+"""
+        # -- OMIT: XFAIL, due to group_index delta counting => Parser problem.
+        # Variant2: {:Color_or_PersonChoice}
+        # Variant1: {:Number_or_YesNo}
+        parser = parse.Parser(schema, type_dict)
+
+        text = """\
+Number:   12
+YesNo:    yes
+Color:    red
+Person:   Alice
+"""
+        # SKIP: Variant2: Bob
+        # SKIP: Variant1: 42
+        expected = [ 12, True, Color.red, "Alice", ] # -- SKIP: "Bob", 42 ]
+
+        result = parser.parse(text)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+    def test_parse_with_many_unnamed_fields_with_variants(self):
+        type_dict = build_type_dict(self.TYPE_CONVERTERS)
+        schema = """\
+Number:   {:Number}
+YesNo:    {:YesNo}
+Color:    {:Color}
+Person:   {:PersonChoice}
+Variant2: {:Color_or_PersonChoice}
+Variant1: {:Number_or_YesNo}
+"""
+        # -- OMIT: XFAIL, due to group_index delta counting => Parser problem.
+        parser = parse.Parser(schema, type_dict)
+
+        text = """\
+Number:   12
+YesNo:    yes
+Color:    red
+Person:   Alice
+Variant2: Bob
+Variant1: 42
+"""
+        expected = [ 12, True, Color.red, "Alice", "Bob", 42 ]
+
+        result = parser.parse(text)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_cardinality.py b/tests/test_cardinality.py
new file mode 100755
index 0000000..621e2e6
--- /dev/null
+++ b/tests/test_cardinality.py
@@ -0,0 +1,556 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test suite to test the :mod:`parse_type.cardinality` module.
+"""
+
+from __future__ import absolute_import
+from .parse_type_test import ParseTypeTestCase, parse_number
+from parse_type import Cardinality, TypeBuilder, build_type_dict
+from parse import Parser
+import parse
+import unittest
+
+# -----------------------------------------------------------------------------
+# TEST CASE: TestCardinality
+# -----------------------------------------------------------------------------
+class TestCardinality(ParseTypeTestCase):
+
+    def test_enum_basics(self):
+        assert Cardinality.optional is Cardinality.zero_or_one
+        assert Cardinality.many0 is Cardinality.zero_or_more
+        assert Cardinality.many  is Cardinality.one_or_more
+
+    def check_pattern_for_cardinality_one(self, pattern, new_pattern):
+        expected_pattern = Cardinality.one.make_pattern(pattern)
+        self.assertEqual(pattern, new_pattern)
+        self.assertEqual(new_pattern, expected_pattern)
+
+    def check_pattern_for_cardinality_zero_or_one(self, pattern, new_pattern):
+        expected_pattern = Cardinality.zero_or_one.schema % pattern
+        self.assertNotEqual(pattern, new_pattern)
+        self.assertEqual(new_pattern, expected_pattern)
+
+    def check_pattern_for_cardinality_zero_or_more(self, pattern, new_pattern):
+        expected_pattern = Cardinality.zero_or_more.make_pattern(pattern)
+        self.assertNotEqual(pattern, new_pattern)
+        self.assertEqual(new_pattern, expected_pattern)
+
+    def check_pattern_for_cardinality_one_or_more(self, pattern, new_pattern):
+        expected_pattern = Cardinality.one_or_more.make_pattern(pattern)
+        self.assertNotEqual(pattern, new_pattern)
+        self.assertEqual(new_pattern, expected_pattern)
+
+    def check_pattern_for_cardinality_optional(self, pattern, new_pattern):
+        expected = Cardinality.optional.make_pattern(pattern)
+        self.assertEqual(new_pattern, expected)
+        self.check_pattern_for_cardinality_zero_or_one(pattern, new_pattern)
+
+    def check_pattern_for_cardinality_many0(self, pattern, new_pattern):
+        expected = Cardinality.many0.make_pattern(pattern)
+        self.assertEqual(new_pattern, expected)
+        self.check_pattern_for_cardinality_zero_or_more(pattern, new_pattern)
+
+    def check_pattern_for_cardinality_many(self, pattern, new_pattern):
+        expected = Cardinality.many.make_pattern(pattern)
+        self.assertEqual(new_pattern, expected)
+        self.check_pattern_for_cardinality_one_or_more(pattern, new_pattern)
+
+    def test_make_pattern(self):
+        data = [
+            (Cardinality.one, r"\d+", r"\d+"),
+            (Cardinality.one, r"\w+", None),
+            (Cardinality.zero_or_one, r"\w+", None),
+            (Cardinality.one_or_more, r"\w+", None),
+            (Cardinality.optional, "XXX", Cardinality.zero_or_one.make_pattern("XXX")),
+            (Cardinality.many0, "XXX", Cardinality.zero_or_more.make_pattern("XXX")),
+            (Cardinality.many,  "XXX", Cardinality.one_or_more.make_pattern("XXX")),
+        ]
+        for cardinality, pattern, expected_pattern in data:
+            if expected_pattern is None:
+                expected_pattern = cardinality.make_pattern(pattern)
+            new_pattern = cardinality.make_pattern(pattern)
+            self.assertEqual(new_pattern, expected_pattern)
+
+            name = cardinality.name
+            checker = getattr(self, "check_pattern_for_cardinality_%s" % name)
+            checker(pattern, new_pattern)
+
+    def test_make_pattern_for_zero_or_one(self):
+        patterns  = [r"\d",    r"\d+",    r"\w+",    r"XXX" ]
+        expecteds = [r"(\d)?", r"(\d+)?", r"(\w+)?", r"(XXX)?" ]
+        for pattern, expected in zip(patterns, expecteds):
+            new_pattern = Cardinality.zero_or_one.make_pattern(pattern)
+            self.assertEqual(new_pattern, expected)
+            self.check_pattern_for_cardinality_zero_or_one(pattern, new_pattern)
+
+    def test_make_pattern_for_zero_or_more(self):
+        pattern  = "XXX"
+        expected = r"(XXX)?(\s*,\s*(XXX))*"
+        new_pattern = Cardinality.zero_or_more.make_pattern(pattern)
+        self.assertEqual(new_pattern, expected)
+        self.check_pattern_for_cardinality_zero_or_more(pattern, new_pattern)
+
+    def test_make_pattern_for_one_or_more(self):
+        pattern  = "XXX"
+        expected = r"(XXX)(\s*,\s*(XXX))*"
+        new_pattern = Cardinality.one_or_more.make_pattern(pattern)
+        self.assertEqual(new_pattern, expected)
+        self.check_pattern_for_cardinality_one_or_more(pattern, new_pattern)
+
+    def test_is_many(self):
+        is_many_true_valueset = set(
+            [Cardinality.zero_or_more, Cardinality.one_or_more])
+
+        for cardinality in Cardinality:
+            expected = cardinality in is_many_true_valueset
+            self.assertEqual(cardinality.is_many(), expected)
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE: CardinalityTypeBuilderTest
+# -----------------------------------------------------------------------------
+class CardinalityTypeBuilderTest(ParseTypeTestCase):
+
+    def check_parse_number_with_zero_or_one(self, parse_candidate,
+                                            type_name="OptionalNumber"):
+        schema = "Optional: {number:%s}" % type_name
+        type_dict = {
+            "Number":  parse_number,
+            type_name: parse_candidate,
+        }
+        parser = parse.Parser(schema, type_dict)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "Optional: ",   "number", None)
+        self.assert_match(parser, "Optional: 1",  "number", 1)
+        self.assert_match(parser, "Optional: 42", "number", 42)
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Optional: x",   "number")  # Not a Number.
+        self.assert_mismatch(parser, "Optional: -1",  "number")  # Negative.
+        self.assert_mismatch(parser, "Optional: a, b", "number") # List of ...
+
+    def check_parse_number_with_optional(self, parse_candidate,
+                                         type_name="OptionalNumber"):
+        self.check_parse_number_with_zero_or_one(parse_candidate, type_name)
+
+    def check_parse_number_with_zero_or_more(self, parse_candidate,
+                                             type_name="Numbers0"):
+        schema = "List: {numbers:%s}" % type_name
+        type_dict = {
+            type_name: parse_candidate,
+        }
+        parser = parse.Parser(schema, type_dict)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "List: ",        "numbers", [ ])
+        self.assert_match(parser, "List: 1",       "numbers", [ 1 ])
+        self.assert_match(parser, "List: 1, 2",    "numbers", [ 1, 2 ])
+        self.assert_match(parser, "List: 1, 2, 3", "numbers", [ 1, 2, 3 ])
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "List: x",  "numbers")  # Not a Number.
+        self.assert_mismatch(parser, "List: -1", "numbers")  # Negative.
+        self.assert_mismatch(parser, "List: 1,", "numbers")  # Trailing sep.
+        self.assert_mismatch(parser, "List: a, b", "numbers") # List of ...
+
+    def check_parse_number_with_one_or_more(self, parse_candidate,
+                                            type_name="Numbers"):
+        schema = "List: {numbers:%s}" % type_name
+        type_dict = {
+            "Number":  parse_number,
+            type_name: parse_candidate,
+        }
+        parser = parse.Parser(schema, type_dict)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "List: 1",       "numbers", [ 1 ])
+        self.assert_match(parser, "List: 1, 2",    "numbers", [ 1, 2 ])
+        self.assert_match(parser, "List: 1, 2, 3", "numbers", [ 1, 2, 3 ])
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "List: ",   "numbers")  # Zero items.
+        self.assert_mismatch(parser, "List: x",  "numbers")  # Not a Number.
+        self.assert_mismatch(parser, "List: -1", "numbers")  # Negative.
+        self.assert_mismatch(parser, "List: 1,", "numbers")  # Trailing sep.
+        self.assert_mismatch(parser, "List: a, b", "numbers") # List of ...
+
+    def check_parse_choice_with_optional(self, parse_candidate):
+        # Choice (["red", "green", "blue"])
+        schema = "Optional: {color:OptionalChoiceColor}"
+        parser = parse.Parser(schema, dict(OptionalChoiceColor=parse_candidate))
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "Optional: ",      "color", None)
+        self.assert_match(parser, "Optional: red",   "color", "red")
+        self.assert_match(parser, "Optional: green", "color", "green")
+        self.assert_match(parser, "Optional: blue",  "color", "blue")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Optional: r",    "color")  # Not a Color.
+        self.assert_mismatch(parser, "Optional: redx", "color")  # Similar.
+        self.assert_mismatch(parser, "Optional: red, blue", "color") # List of ...
+
+
+    def check_parse_number_with_many(self, parse_candidate, type_name="Numbers"):
+        self.check_parse_number_with_one_or_more(parse_candidate, type_name)
+
+    def check_parse_number_with_many0(self, parse_candidate,
+                                      type_name="Numbers0"):
+        self.check_parse_number_with_zero_or_more(parse_candidate, type_name)
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE: TestTypeBuilder4Cardinality
+# -----------------------------------------------------------------------------
+class TestTypeBuilder4Cardinality(CardinalityTypeBuilderTest):
+
+    def test_with_zero_or_one_basics(self):
+        parse_opt_number = TypeBuilder.with_zero_or_one(parse_number)
+        self.assertEqual(parse_opt_number.pattern, r"(\d+)?")
+
+    def test_with_zero_or_one__number(self):
+        parse_opt_number = TypeBuilder.with_zero_or_one(parse_number)
+        self.check_parse_number_with_zero_or_one(parse_opt_number)
+
+    def test_with_optional__number(self):
+        # -- ALIAS FOR: zero_or_one
+        parse_opt_number = TypeBuilder.with_optional(parse_number)
+        self.check_parse_number_with_optional(parse_opt_number)
+
+    def test_with_optional__choice(self):
+        # -- ALIAS FOR: zero_or_one
+        parse_color = TypeBuilder.make_choice(["red", "green", "blue"])
+        parse_opt_color = TypeBuilder.with_optional(parse_color)
+        self.check_parse_choice_with_optional(parse_opt_color)
+
+    def test_with_zero_or_more_basics(self):
+        parse_numbers = TypeBuilder.with_zero_or_more(parse_number)
+        self.assertEqual(parse_numbers.pattern, r"(\d+)?(\s*,\s*(\d+))*")
+
+    def test_with_zero_or_more__number(self):
+        parse_numbers = TypeBuilder.with_zero_or_more(parse_number)
+        self.check_parse_number_with_zero_or_more(parse_numbers)
+
+    def test_with_zero_or_more__choice(self):
+        parse_color  = TypeBuilder.make_choice(["red", "green", "blue"])
+        parse_colors = TypeBuilder.with_zero_or_more(parse_color)
+        parse_colors.name = "Colors0"
+
+        extra_types = build_type_dict([ parse_colors ])
+        schema = "List: {colors:Colors0}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "List: ",           "colors", [ ])
+        self.assert_match(parser, "List: green",      "colors", [ "green" ])
+        self.assert_match(parser, "List: red, green", "colors", [ "red", "green" ])
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "List: x",  "colors")  # Not a Color.
+        self.assert_mismatch(parser, "List: black", "colors")  # Unknown
+        self.assert_mismatch(parser, "List: red,",  "colors")  # Trailing sep.
+        self.assert_mismatch(parser, "List: a, b",  "colors")  # List of ...
+
+    def test_with_one_or_more_basics(self):
+        parse_numbers = TypeBuilder.with_one_or_more(parse_number)
+        self.assertEqual(parse_numbers.pattern, r"(\d+)(\s*,\s*(\d+))*")
+
+    def test_with_one_or_more_basics_with_other_separator(self):
+        parse_numbers2 = TypeBuilder.with_one_or_more(parse_number, listsep=';')
+        self.assertEqual(parse_numbers2.pattern, r"(\d+)(\s*;\s*(\d+))*")
+
+        parse_numbers2 = TypeBuilder.with_one_or_more(parse_number, listsep=':')
+        self.assertEqual(parse_numbers2.pattern, r"(\d+)(\s*:\s*(\d+))*")
+
+    def test_with_one_or_more(self):
+        parse_numbers = TypeBuilder.with_one_or_more(parse_number)
+        self.check_parse_number_with_one_or_more(parse_numbers)
+
+    def test_with_many(self):
+        # -- ALIAS FOR: one_or_more
+        parse_numbers = TypeBuilder.with_many(parse_number)
+        self.check_parse_number_with_many(parse_numbers)
+
+    def test_with_many0(self):
+        # -- ALIAS FOR: one_or_more
+        parse_numbers = TypeBuilder.with_many0(parse_number)
+        self.check_parse_number_with_many0(parse_numbers)
+
+    def test_with_one_or_more_choice(self):
+        parse_color  = TypeBuilder.make_choice(["red", "green", "blue"])
+        parse_colors = TypeBuilder.with_one_or_more(parse_color)
+        parse_colors.name = "Colors"
+
+        extra_types = build_type_dict([ parse_colors ])
+        schema = "List: {colors:Colors}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "List: green",      "colors", [ "green" ])
+        self.assert_match(parser, "List: red, green", "colors", [ "red", "green" ])
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "List: ",   "colors")  # Zero items.
+        self.assert_mismatch(parser, "List: x",  "colors")  # Not a Color.
+        self.assert_mismatch(parser, "List: black", "colors")  # Unknown
+        self.assert_mismatch(parser, "List: red,",  "colors")  # Trailing sep.
+        self.assert_mismatch(parser, "List: a, b",  "colors")  # List of ...
+
+    def test_with_one_or_more_enum(self):
+        parse_color  = TypeBuilder.make_enum({"red": 1, "green":2, "blue": 3})
+        parse_colors = TypeBuilder.with_one_or_more(parse_color)
+        parse_colors.name = "Colors"
+
+        extra_types = build_type_dict([ parse_colors ])
+        schema = "List: {colors:Colors}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "List: green",      "colors", [ 2 ])
+        self.assert_match(parser, "List: red, green", "colors", [ 1, 2 ])
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "List: ",   "colors")  # Zero items.
+        self.assert_mismatch(parser, "List: x",  "colors")  # Not a Color.
+        self.assert_mismatch(parser, "List: black", "colors")  # Unknown
+        self.assert_mismatch(parser, "List: red,",  "colors")  # Trailing sep.
+        self.assert_mismatch(parser, "List: a, b",  "colors")  # List of ...
+
+    def test_with_one_or_more_with_other_separator(self):
+        parse_numbers2 = TypeBuilder.with_one_or_more(parse_number, listsep=';')
+        parse_numbers2.name = "Numbers2"
+
+        extra_types = build_type_dict([ parse_numbers2 ])
+        schema = "List: {numbers:Numbers2}"
+        parser = parse.Parser(schema, extra_types)
+
+        # -- PERFORM TESTS:
+        self.assert_match(parser, "List: 1",       "numbers", [ 1 ])
+        self.assert_match(parser, "List: 1; 2",    "numbers", [ 1, 2 ])
+        self.assert_match(parser, "List: 1; 2; 3", "numbers", [ 1, 2, 3 ])
+
+    def test_with_cardinality_one(self):
+        parse_number2 = TypeBuilder.with_cardinality(Cardinality.one, parse_number)
+        assert parse_number2 is parse_number
+
+    def test_with_cardinality_zero_or_one(self):
+        parse_opt_number = TypeBuilder.with_cardinality(
+                Cardinality.zero_or_one, parse_number)
+        self.check_parse_number_with_zero_or_one(parse_opt_number)
+
+    def test_with_cardinality_zero_or_more(self):
+        parse_many0_numbers = TypeBuilder.with_cardinality(
+                Cardinality.zero_or_more, parse_number)
+        self.check_parse_number_with_zero_or_more(parse_many0_numbers)
+
+    def test_with_cardinality_one_or_more(self):
+        parse_many_numbers = TypeBuilder.with_cardinality(
+                Cardinality.one_or_more, parse_number)
+        self.check_parse_number_with_one_or_more(parse_many_numbers)
+
+    def test_with_cardinality_optional(self):
+        parse_opt_number = TypeBuilder.with_cardinality(
+                Cardinality.optional, parse_number)
+        self.check_parse_number_with_optional(parse_opt_number)
+
+    def test_with_cardinality_many0(self):
+        parse_many0_numbers = TypeBuilder.with_cardinality(
+                Cardinality.many0, parse_number)
+        self.check_parse_number_with_zero_or_more(parse_many0_numbers)
+
+    def test_with_cardinality_many(self):
+        parse_many_numbers = TypeBuilder.with_cardinality(
+                Cardinality.many, parse_number)
+        self.check_parse_number_with_many(parse_many_numbers)
+
+    def test_parse_with_optional_and_named_fields(self):
+        parse_opt_number = TypeBuilder.with_optional(parse_number)
+        parse_opt_number.name = "Number?"
+
+        type_dict = build_type_dict([parse_opt_number, parse_number])
+        schema = "Numbers: {number1:Number?} {number2:Number}"
+        parser = parse.Parser(schema, type_dict)
+
+        # -- CASE: Optional number is present
+        result = parser.parse("Numbers: 34 12")
+        expected = dict(number1=34, number2=12)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.named, expected)
+
+        # -- CASE: Optional number is missing
+        result = parser.parse("Numbers:  12")
+        expected = dict(number1=None, number2=12)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.named, expected)
+
+    def test_parse_with_optional_and_unnamed_fields(self):
+        # -- ENSURE: Cardinality.optional.group_count is correct
+        # REQUIRES: Parser := parse_type.Parser with group_count support
+        parse_opt_number = TypeBuilder.with_optional(parse_number)
+        parse_opt_number.name = "Number?"
+
+        type_dict = build_type_dict([parse_opt_number, parse_number])
+        schema = "Numbers: {:Number?} {:Number}"
+        parser = Parser(schema, type_dict)
+
+        # -- CASE: Optional number is present
+        result = parser.parse("Numbers: 34 12")
+        expected = (34, 12)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+        # -- CASE: Optional number is missing
+        result = parser.parse("Numbers:  12")
+        expected = (None, 12)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+    def test_parse_with_many_and_unnamed_fields(self):
+        # -- ENSURE: Cardinality.one_or_more.group_count is correct
+        # REQUIRES: Parser := parse_type.Parser with group_count support
+        parse_many_numbers = TypeBuilder.with_many(parse_number)
+        parse_many_numbers.name = "Number+"
+
+        type_dict = build_type_dict([parse_many_numbers, parse_number])
+        schema = "Numbers: {:Number+} {:Number}"
+        parser = Parser(schema, type_dict)
+
+        # -- CASE:
+        result = parser.parse("Numbers: 1, 2, 3 42")
+        expected = ([1, 2, 3], 42)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+        result = parser.parse("Numbers: 3 43")
+        expected = ([ 3 ], 43)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+    def test_parse_with_many0_and_unnamed_fields(self):
+        # -- ENSURE: Cardinality.zero_or_more.group_count is correct
+        # REQUIRES: Parser := parse_type.Parser with group_count support
+        parse_many0_numbers = TypeBuilder.with_many0(parse_number)
+        parse_many0_numbers.name = "Number*"
+
+        type_dict = build_type_dict([parse_many0_numbers, parse_number])
+        schema = "Numbers: {:Number*} {:Number}"
+        parser = Parser(schema, type_dict)
+
+        # -- CASE: Optional numbers are present
+        result = parser.parse("Numbers: 1, 2, 3 42")
+        expected = ([1, 2, 3], 42)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+        # -- CASE: Optional numbers are missing := EMPTY-LIST
+        result = parser.parse("Numbers:  43")
+        expected = ([ ], 43)
+        self.assertIsNotNone(result)
+        self.assertEqual(result.fixed, tuple(expected))
+
+
+# class TestParserWithManyTypedFields(ParseTypeTestCase):
+
+    #parse_variant1 = TypeBuilder.make_variant([parse_number, parse_yesno])
+    #parse_variant1.name = "Number_or_YesNo"
+    #parse_variant2 = TypeBuilder.make_variant([parse_color, parse_person_choice])
+    #parse_variant2.name = "Color_or_PersonChoice"
+    #TYPE_CONVERTERS = [
+    #    parse_number,
+    #    parse_yesno,
+    #    parse_color,
+    #    parse_person_choice,
+    #    parse_variant1,
+    #    parse_variant2,
+    #]
+    #
+#    def test_parse_with_many_named_fields(self):
+#        type_dict = build_type_dict(self.TYPE_CONVERTERS)
+#        schema = """\
+#Number:   {number:Number}
+#YesNo:    {answer:YesNo}
+#Color:    {color:Color}
+#Person:   {person:PersonChoice}
+#Variant1: {variant1:Number_or_YesNo}
+#Variant2: {variant2:Color_or_PersonChoice}
+#"""
+#        parser = parse.Parser(schema, type_dict)
+#
+#        text = """\
+#Number:   12
+#YesNo:    yes
+#Color:    red
+#Person:   Alice
+#Variant1: 42
+#Variant2: Bob
+#"""
+#        expected = dict(
+#            number=12,
+#            answer=True,
+#            color=Color.red,
+#            person="Alice",
+#            variant1=42,
+#            variant2="Bob"
+#        )
+#
+#        result = parser.parse(text)
+#        self.assertIsNotNone(result)
+#        self.assertEqual(result.named, expected)
+
+#    def test_parse_with_many_unnamed_fields(self):
+#        type_dict = build_type_dict(self.TYPE_CONVERTERS)
+#        schema = """\
+#Number:   {:Number}
+#YesNo:    {:YesNo}
+#Color:    {:Color}
+#Person:   {:PersonChoice}
+#"""
+#        # -- OMIT: XFAIL, due to group_index delta counting => Parser problem.
+#        # Variant2: {:Color_or_PersonChoice}
+#        # Variant1: {:Number_or_YesNo}
+#        parser = parse.Parser(schema, type_dict)
+#
+#        text = """\
+#Number:   12
+#YesNo:    yes
+#Color:    red
+#Person:   Alice
+#"""
+#        # SKIP: Variant2: Bob
+#        # SKIP: Variant1: 42
+#        expected = [ 12, True, Color.red, "Alice", ] # -- SKIP: "Bob", 42 ]
+#
+#        result = parser.parse(text)
+#        self.assertIsNotNone(result)
+#        self.assertEqual(result.fixed, tuple(expected))
+
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_cardinality_field.py b/tests/test_cardinality_field.py
new file mode 100755
index 0000000..3c59cb0
--- /dev/null
+++ b/tests/test_cardinality_field.py
@@ -0,0 +1,328 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test experiment for parse.
+Add cardinality format field after type:
+
+    "... {person:Person?} ..."   -- CARDINALITY: Zero or one,  0..1 (optional)
+    "... {persons:Person*} ..."  -- CARDINALITY: Zero or more, 0..N (many0)
+    "... {persons:Person+} ..."  -- CARDINALITY: One or more,  1..N (many)
+
+
+REQUIRES:
+    parse >= 1.5.3.1 ('pattern' attribute support and further extensions)
+
+STATUS:
+    IDEA, working prototype with patched parse module, but not accepted.
+"""
+
+from __future__ import absolute_import
+from .parse_type_test \
+    import TestCase, parse_number, unittest
+from .test_cardinality import CardinalityTypeBuilderTest
+from parse_type import Cardinality
+from parse_type.cardinality_field \
+    import CardinalityField, CardinalityFieldTypeBuilder, MissingTypeError
+
+
+# -------------------------------------------------------------------------
+# TEST CASE: TestParseTypeWithCardinalityField
+# -------------------------------------------------------------------------
+class TestCardinalityField(TestCase):
+    VALID_TYPE_NAMES = ["Number?", "Number*", "Number+"]
+    INVALID_TYPE_NAMES = ["?Invalid", "Inval*d", "In+valid"]
+
+    def test_pattern_chars(self):
+        for pattern_char in CardinalityField.pattern_chars:
+            self.assertIn(pattern_char, CardinalityField.from_char_map)
+
+    def test_to_from_char_map_symmetry(self):
+        for cardinality, char in CardinalityField.to_char_map.items():
+            self.assertEqual(cardinality, CardinalityField.from_char_map[char])
+        for char, cardinality in CardinalityField.from_char_map.items():
+            self.assertEqual(char, CardinalityField.to_char_map[cardinality])
+
+    def test_matches_type_name(self):
+        for type_name in self.VALID_TYPE_NAMES:
+            self.assertTrue(CardinalityField.matches_type(type_name))
+
+        for type_name in self.INVALID_TYPE_NAMES:
+            self.assertFalse(CardinalityField.matches_type(type_name))
+
+    def test_split_type__with_valid_special_names(self):
+        actual = CardinalityField.split_type("Color?")
+        self.assertEqual(actual, ("Color", Cardinality.optional))
+        self.assertEqual(actual, ("Color", Cardinality.zero_or_one))
+
+        actual = CardinalityField.split_type("Color+")
+        self.assertEqual(actual, ("Color", Cardinality.many))
+        self.assertEqual(actual, ("Color", Cardinality.one_or_more))
+
+        actual = CardinalityField.split_type("Color*")
+        self.assertEqual(actual, ("Color", Cardinality.many0))
+        self.assertEqual(actual, ("Color", Cardinality.zero_or_more))
+
+    def test_split_type__with_valid_special_names2(self):
+        for type_name in self.VALID_TYPE_NAMES:
+            self.assertTrue(CardinalityField.matches_type(type_name))
+            cardinality_char = type_name[-1]
+            expected_basename = type_name[:-1]
+            expected_cardinality = CardinalityField.from_char_map[cardinality_char]
+            expected = (expected_basename, expected_cardinality)
+            actual = CardinalityField.split_type(type_name)
+            self.assertEqual(actual, expected)
+
+    def test_split_type__with_cardinality_one(self):
+        actual = CardinalityField.split_type("Color")
+        self.assertEqual(actual, ("Color", Cardinality.one))
+
+    def test_split_type__with_invalid_names(self):
+        for type_name in self.INVALID_TYPE_NAMES:
+            expected = (type_name, Cardinality.one)
+            actual = CardinalityField.split_type(type_name)
+            self.assertEqual(actual, expected)
+            self.assertFalse(CardinalityField.matches_type(type_name))
+
+    def test_make_type__with_cardinality_one(self):
+        expected = "Number"
+        type_name = CardinalityField.make_type("Number", Cardinality.one)
+        self.assertEqual(type_name, expected)
+        self.assertFalse(CardinalityField.matches_type(type_name))
+
+    def test_make_type__with_cardinality_optional(self):
+        expected = "Number?"
+        type_name = CardinalityField.make_type("Number", Cardinality.optional)
+        self.assertEqual(type_name, expected)
+        self.assertTrue(CardinalityField.matches_type(type_name))
+
+        type_name2 = CardinalityField.make_type("Number", Cardinality.zero_or_one)
+        self.assertEqual(type_name2, expected)
+        self.assertEqual(type_name2, type_name)
+
+    def test_make_type__with_cardinality_many(self):
+        expected = "Number+"
+        type_name = CardinalityField.make_type("Number", Cardinality.many)
+        self.assertEqual(type_name, expected)
+        self.assertTrue(CardinalityField.matches_type(type_name))
+
+        type_name2 = CardinalityField.make_type("Number", Cardinality.one_or_more)
+        self.assertEqual(type_name2, expected)
+        self.assertEqual(type_name2, type_name)
+
+    def test_make_type__with_cardinality_many0(self):
+        expected = "Number*"
+        type_name = CardinalityField.make_type("Number", Cardinality.many0)
+        self.assertEqual(type_name, expected)
+        self.assertTrue(CardinalityField.matches_type(type_name))
+
+        type_name2 = CardinalityField.make_type("Number", Cardinality.zero_or_more)
+        self.assertEqual(type_name2, expected)
+        self.assertEqual(type_name2, type_name)
+
+    def test_split_type2make_type__symmetry_with_valid_names(self):
+        for type_name in self.VALID_TYPE_NAMES:
+            primary_name, cardinality = CardinalityField.split_type(type_name)
+            type_name2 = CardinalityField.make_type(primary_name, cardinality)
+            self.assertEqual(type_name, type_name2)
+
+    def test_split_type2make_type__symmetry_with_cardinality_one(self):
+        for type_name in self.INVALID_TYPE_NAMES:
+            primary_name, cardinality = CardinalityField.split_type(type_name)
+            type_name2 = CardinalityField.make_type(primary_name, cardinality)
+            self.assertEqual(type_name, primary_name)
+            self.assertEqual(type_name, type_name2)
+            self.assertEqual(cardinality, Cardinality.one)
+
+# -------------------------------------------------------------------------
+# TEST CASE:
+# -------------------------------------------------------------------------
+class TestCardinalityFieldTypeBuilder(CardinalityTypeBuilderTest):
+    INVALID_TYPE_DICT_DATA = [
+        (dict(),                        "empty type_dict"),
+        (dict(NumberX=parse_number),    "non-empty type_dict (wrong name)"),
+    ]
+
+    # -- UTILITY METHODS:
+    def generate_type_variants(self,type_name):
+        for pattern_char in CardinalityField.pattern_chars:
+            special_name = "%s%s" % (type_name.strip(), pattern_char)
+            self.assertTrue(CardinalityField.matches_type(special_name))
+            yield special_name
+
+    # -- METHOD: CardinalityFieldTypeBuilder.create_type_variant()
+    def test_create_type_variant__with_many_and_type_converter(self):
+        type_builder = CardinalityFieldTypeBuilder
+        parse_candidate = type_builder.create_type_variant("Number+",
+                                                type_converter=parse_number)
+        self.check_parse_number_with_many(parse_candidate, "Number+")
+
+    def test_create_type_variant__with_optional_and_type_dict(self):
+        type_builder = CardinalityFieldTypeBuilder
+        parse_candidate = type_builder.create_type_variant("Number?",
+                                                dict(Number=parse_number))
+        self.check_parse_number_with_optional(parse_candidate, "Number?")
+
+    def test_create_type_variant__with_many_and_type_dict(self):
+        type_builder = CardinalityFieldTypeBuilder
+        parse_candidate = type_builder.create_type_variant("Number+",
+                                                dict(Number=parse_number))
+        self.check_parse_number_with_many(parse_candidate, "Number+")
+
+    def test_create_type_variant__with_many0_and_type_dict(self):
+        type_builder = CardinalityFieldTypeBuilder
+        parse_candidate = type_builder.create_type_variant("Number*",
+                                                dict(Number=parse_number))
+        self.check_parse_number_with_many0(parse_candidate, "Number*")
+
+    def test_create_type_variant__can_create_all_variants(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for special_name in self.generate_type_variants("Number"):
+            # -- CASE: type_converter
+            parse_candidate = type_builder.create_type_variant(special_name,
+                                                               parse_number)
+            self.assertTrue(callable(parse_candidate))
+
+            # -- CASE: type_dict
+            parse_candidate = type_builder.create_type_variant(special_name,
+                                                    dict(Number=parse_number))
+            self.assertTrue(callable(parse_candidate))
+
+    def test_create_type_variant__raises_error_with_invalid_type_name(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES:
+            with self.assertRaises(ValueError):
+                type_builder.create_type_variant(invalid_type_name,
+                                                 parse_number)
+
+    def test_create_type_variant__raises_error_with_missing_primary_type(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for special_name in self.generate_type_variants("Number"):
+            for type_dict, description in self.INVALID_TYPE_DICT_DATA:
+                with self.assertRaises(MissingTypeError):
+                    type_builder.create_type_variant(special_name, type_dict)
+
+
+    # -- METHOD: CardinalityFieldTypeBuilder.create_type_variants()
+    def test_create_type_variants__all(self):
+        type_builder = CardinalityFieldTypeBuilder
+        special_names = ["Number?", "Number+", "Number*"]
+        type_dict = dict(Number=parse_number)
+        new_types = type_builder.create_type_variants(special_names, type_dict)
+        self.assertSequenceEqual(set(new_types.keys()), set(special_names))
+        self.assertEqual(len(new_types), 3)
+
+        parse_candidate = new_types["Number?"]
+        self.check_parse_number_with_optional(parse_candidate, "Number?")
+        parse_candidate = new_types["Number+"]
+        self.check_parse_number_with_many(parse_candidate, "Number+")
+        parse_candidate = new_types["Number*"]
+        self.check_parse_number_with_many0(parse_candidate, "Number*")
+
+    def test_create_type_variants__raises_error_with_invalid_type_name(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES:
+            type_dict = dict(Number=parse_number)
+            with self.assertRaises(ValueError):
+                type_names = [invalid_type_name]
+                type_builder.create_type_variants(type_names, type_dict)
+
+    def test_create_missing_type_variants__raises_error_with_missing_primary_type(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for special_name in self.generate_type_variants("Number"):
+            for type_dict, description in self.INVALID_TYPE_DICT_DATA:
+                self.assertNotIn("Number", type_dict)
+                with self.assertRaises(MissingTypeError):
+                    names = [special_name]
+                    type_builder.create_type_variants(names, type_dict)
+
+
+    # -- METHOD: CardinalityFieldTypeBuilder.create_missing_type_variants()
+    def test_create_missing_type_variants__all_missing(self):
+        type_builder = CardinalityFieldTypeBuilder
+        missing_names = ["Number?", "Number+", "Number*"]
+        new_types = type_builder.create_missing_type_variants(missing_names,
+                                                    dict(Number=parse_number))
+        self.assertSequenceEqual(set(new_types.keys()), set(missing_names))
+        self.assertEqual(len(new_types), 3)
+
+    def test_create_missing_type_variants__none_missing(self):
+        # -- PREPARE: Create all types and store them in the type_dict.
+        type_builder = CardinalityFieldTypeBuilder
+        type_names     = ["Number?", "Number+", "Number*"]
+        all_type_names = ["Number", "Number?", "Number+", "Number*"]
+        type_dict = dict(Number=parse_number)
+        new_types = type_builder.create_missing_type_variants(type_names,
+                                                                type_dict)
+        type_dict.update(new_types)
+        self.assertSequenceEqual(set(new_types.keys()), set(type_names))
+        self.assertSequenceEqual(set(type_dict.keys()), set(all_type_names))
+
+        # -- TEST: All special types are already stored in the type_dict.
+        new_types2 = type_builder.create_missing_type_variants(type_names,
+                                                                type_dict)
+        self.assertEqual(len(new_types2), 0)
+
+    def test_create_missing_type_variants__some_missing(self):
+        # -- PREPARE: Create some types and store them in the type_dict.
+        type_builder = CardinalityFieldTypeBuilder
+        special_names  = ["Number?", "Number+", "Number*"]
+        type_names1    = ["Number?", "Number*"]
+        type_names2    = special_names
+        type_dict = dict(Number=parse_number)
+        new_types = type_builder.create_missing_type_variants(type_names1,
+                                                                type_dict)
+        type_dict.update(new_types)
+        self.assertSequenceEqual(set(new_types.keys()), set(type_names1))
+        self.assertSequenceEqual(set(type_dict.keys()),
+                                set(["Number", "Number?", "Number*"]))
+
+        # -- TEST: All special types are already stored in the type_dict.
+        new_types2 = type_builder.create_missing_type_variants(type_names2,
+                                                                type_dict)
+        self.assertEqual(len(new_types2), 1)
+        self.assertSequenceEqual(set(new_types2.keys()), set(["Number+"]))
+
+    def test_create_type_variant__raises_error_with_invalid_type_name(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES:
+            type_dict = dict(Number=parse_number)
+            with self.assertRaises(ValueError):
+                type_names = [invalid_type_name]
+                type_builder.create_missing_type_variants(type_names, type_dict)
+
+    def test_create_missing_type_variants__raises_error_with_missing_primary_type(self):
+        type_builder = CardinalityFieldTypeBuilder
+        for special_name in self.generate_type_variants("Number"):
+            for type_dict, description in self.INVALID_TYPE_DICT_DATA:
+                self.assertNotIn("Number", type_dict)
+                with self.assertRaises(MissingTypeError):
+                    names = [special_name]
+                    type_builder.create_missing_type_variants(names, type_dict)
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_cardinality_field0.py b/tests/test_cardinality_field0.py
new file mode 100755
index 0000000..663a30a
--- /dev/null
+++ b/tests/test_cardinality_field0.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test experiment for parse.
+Add cardinality format field after type:
+
+    "... {person:Person?} ..."   -- CARDINALITY: Zero or one,  0..1 (optional)
+    "... {persons:Person*} ..."  -- CARDINALITY: Zero or more, 0..N (many0)
+    "... {persons:Person+} ..."  -- CARDINALITY: One or more,  1..N (many)
+
+
+REQUIRES:
+    parse >= 1.5.3.1 ('pattern' attribute support and further extensions)
+
+STATUS:
+    IDEA, working prototype with patched parse module, but not accepted.
+"""
+
+from __future__ import absolute_import
+from .parse_type_test import ParseTypeTestCase
+from parse_type import TypeBuilder, build_type_dict
+import parse
+import unittest
+
+ENABLED = False
+if ENABLED:
+    # -------------------------------------------------------------------------
+    # TEST CASE: TestParseTypeWithCardinalityField
+    # -------------------------------------------------------------------------
+    class TestParseTypeWithCardinalityField(ParseTypeTestCase):
+        """
+        Test cardinality field part in parse type expressions, ala:
+
+            "... {person:Person?} ..."   -- OPTIONAL: cardinality is zero or one.
+            "... {persons:Person*} ..."  -- MANY0: cardinality is zero or more.
+            "... {persons:Person+} ..."  -- MANY:  cardinality is one or more.
+
+        NOTE:
+          * TypeBuilder has a similar and slightly more flexible feature.
+          * Cardinality field part works currently only for user-defined types.
+        """
+
+        def test_without_cardinality_field(self):
+            # -- IMPLCIT CARDINALITY: one
+            # -- SETUP:
+            parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"])
+            parse_person.name = "Person"      # For testing only.
+            extra_types = build_type_dict([ parse_person ])
+            schema = "One: {person:Person}"
+            parser = parse.Parser(schema, extra_types)
+
+            # -- PERFORM TESTS:
+            self.assert_match(parser, "One: Alice", "person", "Alice")
+            self.assert_match(parser, "One: Bob",   "person", "Bob")
+
+            # -- PARSE MISMATCH:
+            self.assert_mismatch(parser, "One: ", "person")        # Missing.
+            self.assert_mismatch(parser, "One: BAlice", "person")  # Similar1.
+            self.assert_mismatch(parser, "One: Boby", "person")    # Similar2.
+            self.assert_mismatch(parser, "One: a",    "person")    # INVALID ...
+
+        def test_cardinality_field_with_zero_or_one(self):
+            # -- SETUP:
+            parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"])
+            parse_person.name = "Person"      # For testing only.
+            extra_types = build_type_dict([ parse_person ])
+            schema = "Optional: {person:Person?}"
+            parser = parse.Parser(schema, extra_types)
+
+            # -- PERFORM TESTS:
+            self.assert_match(parser, "Optional: ",      "person", None)
+            self.assert_match(parser, "Optional: Alice", "person", "Alice")
+            self.assert_match(parser, "Optional: Bob",   "person", "Bob")
+
+            # -- PARSE MISMATCH:
+            self.assert_mismatch(parser, "Optional: Anna", "person")  # Similar1.
+            self.assert_mismatch(parser, "Optional: Boby", "person")  # Similar2.
+            self.assert_mismatch(parser, "Optional: a",    "person")  # INVALID ...
+
+        def test_cardinality_field_with_one_or_more(self):
+            # -- SETUP:
+            parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"])
+            parse_person.name = "Person"      # For testing only.
+            extra_types = build_type_dict([ parse_person ])
+            schema = "List: {persons:Person+}"
+            parser = parse.Parser(schema, extra_types)
+
+            # -- PERFORM TESTS:
+            self.assert_match(parser, "List: Alice", "persons", [ "Alice" ])
+            self.assert_match(parser, "List: Bob",   "persons", [ "Bob" ])
+            self.assert_match(parser, "List: Bob, Alice",
+                                      "persons", [ "Bob", "Alice" ])
+
+            # -- PARSE MISMATCH:
+            self.assert_mismatch(parser, "List: ",       "persons")  # Zero items.
+            self.assert_mismatch(parser, "List: BAlice", "persons")  # Unknown1.
+            self.assert_mismatch(parser, "List: Boby",   "persons")  # Unknown2.
+            self.assert_mismatch(parser, "List: Alice,", "persons")  # Trailing,
+            self.assert_mismatch(parser, "List: a, b",   "persons")  # List of...
+
+        def test_cardinality_field_with_zero_or_more(self):
+            # -- SETUP:
+            parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"])
+            parse_person.name = "Person"      # For testing only.
+            extra_types = build_type_dict([ parse_person ])
+            schema = "List: {persons:Person*}"
+            parser = parse.Parser(schema, extra_types)
+
+            # -- PERFORM TESTS:
+            self.assert_match(parser, "List: ", "persons", [ ])
+            self.assert_match(parser, "List: Alice", "persons", [ "Alice" ])
+            self.assert_match(parser, "List: Bob",   "persons", [ "Bob" ])
+            self.assert_match(parser, "List: Bob, Alice",
+                "persons", [ "Bob", "Alice" ])
+
+            # -- PARSE MISMATCH:
+            self.assert_mismatch(parser, "List:", "persons")         # Too short.
+            self.assert_mismatch(parser, "List: BAlice", "persons")  # Unknown1.
+            self.assert_mismatch(parser, "List: Boby",   "persons")  # Unknown2.
+            self.assert_mismatch(parser, "List: Alice,", "persons")  # Trailing,
+            self.assert_mismatch(parser, "List: a, b",   "persons")  # List of...
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_cfparse.py b/tests/test_cfparse.py
new file mode 100644
index 0000000..732234e
--- /dev/null
+++ b/tests/test_cfparse.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test suite to test the :mod:`parse_type.cfparse` module.
+"""
+
+from __future__ import absolute_import
+from .parse_type_test import ParseTypeTestCase, parse_number, unittest
+from parse_type.cfparse import Parser
+from parse_type.cardinality_field \
+    import MissingTypeError, CardinalityFieldTypeBuilder
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE:
+# -----------------------------------------------------------------------------
+class TestParser(ParseTypeTestCase):
+    """
+    Test :class:`parse_type.cfparse.Parser`.
+    Ensure that:
+
+      * parser can parse fields with CardinalityField part
+        even when these special type variants are not provided.
+
+      * parser creates missing type converter variants for CardinalityFields
+        as long as the primary type converter for cardinality=1 is provided.
+    """
+    SPECIAL_FIELD_TYPES_DATA = [
+        ("{number1:Number?}", ["Number?"]),
+        ("{number2:Number+}", ["Number+"]),
+        ("{number3:Number*}", ["Number*"]),
+        ("{number1:Number?} {number2:Number+} {number3:Number*}",
+                ["Number?", "Number+", "Number*"]),
+    ]
+
+    def test_parser__can_parse_normal_fields(self):
+        existing_types = dict(Number=parse_number)
+        schema = "Number: {number:Number}"
+        parser = Parser(schema, existing_types)
+        self.assert_match(parser, "Number: 42",  "number", 42)
+        self.assert_match(parser, "Number: 123", "number", 123)
+        self.assert_mismatch(parser, "Number: ")
+        self.assert_mismatch(parser, "Number: XXX")
+        self.assert_mismatch(parser, "Number: -123")
+
+    def test_parser__can_parse_cardinality_field_optional(self):
+        # -- CARDINALITY: 0..1 = zero_or_one = optional
+        existing_types = dict(Number=parse_number)
+        self.assertFalse("Number?" in existing_types)
+
+        # -- ENSURE: Missing type variant is created.
+        schema = "OptionalNumber: {number:Number?}"
+        parser = Parser(schema, existing_types)
+        self.assertTrue("Number?" in existing_types)
+
+        # -- ENSURE: Newly created type variant is usable.
+        self.assert_match(parser, "OptionalNumber: 42",  "number", 42)
+        self.assert_match(parser, "OptionalNumber: 123", "number", 123)
+        self.assert_match(parser, "OptionalNumber: ",    "number", None)
+        self.assert_mismatch(parser, "OptionalNumber:")
+        self.assert_mismatch(parser, "OptionalNumber: XXX")
+        self.assert_mismatch(parser, "OptionalNumber: -123")
+
+    def test_parser__can_parse_cardinality_field_many(self):
+        # -- CARDINALITY: 1..* = one_or_more = many
+        existing_types = dict(Number=parse_number)
+        self.assertFalse("Number+" in existing_types)
+
+        # -- ENSURE: Missing type variant is created.
+        schema = "List: {numbers:Number+}"
+        parser = Parser(schema, existing_types)
+        self.assertTrue("Number+" in existing_types)
+
+        # -- ENSURE: Newly created type variant is usable.
+        self.assert_match(parser, "List: 42",  "numbers", [42])
+        self.assert_match(parser, "List: 1, 2, 3", "numbers", [1, 2, 3])
+        self.assert_match(parser, "List: 4,5,6", "numbers", [4, 5, 6])
+        self.assert_mismatch(parser, "List: ")
+        self.assert_mismatch(parser, "List:")
+        self.assert_mismatch(parser, "List: XXX")
+        self.assert_mismatch(parser, "List: -123")
+
+    def test_parser__can_parse_cardinality_field_many_with_own_type_builder(self):
+        # -- CARDINALITY: 1..* = one_or_more = many
+        class MyCardinalityFieldTypeBuilder(CardinalityFieldTypeBuilder):
+            listsep = ';'
+
+        type_builder = MyCardinalityFieldTypeBuilder
+        existing_types = dict(Number=parse_number)
+        self.assertFalse("Number+" in existing_types)
+
+        # -- ENSURE: Missing type variant is created.
+        schema = "List: {numbers:Number+}"
+        parser = Parser(schema, existing_types, type_builder=type_builder)
+        self.assertTrue("Number+" in existing_types)
+
+        # -- ENSURE: Newly created type variant is usable.
+        # NOTE: Use other list separator.
+        self.assert_match(parser, "List: 42",  "numbers", [42])
+        self.assert_match(parser, "List: 1; 2; 3", "numbers", [1, 2, 3])
+        self.assert_match(parser, "List: 4;5;6", "numbers", [4, 5, 6])
+        self.assert_mismatch(parser, "List: ")
+        self.assert_mismatch(parser, "List:")
+        self.assert_mismatch(parser, "List: XXX")
+        self.assert_mismatch(parser, "List: -123")
+
+    def test_parser__can_parse_cardinality_field_many0(self):
+        # -- CARDINALITY: 0..* = zero_or_more = many0
+        existing_types = dict(Number=parse_number)
+        self.assertFalse("Number*" in existing_types)
+
+        # -- ENSURE: Missing type variant is created.
+        schema = "List0: {numbers:Number*}"
+        parser = Parser(schema, existing_types)
+        self.assertTrue("Number*" in existing_types)
+
+        # -- ENSURE: Newly created type variant is usable.
+        self.assert_match(parser, "List0: 42",  "numbers", [42])
+        self.assert_match(parser, "List0: 1, 2, 3", "numbers", [1, 2, 3])
+        self.assert_match(parser, "List0: ",  "numbers", [])
+        self.assert_mismatch(parser, "List0:")
+        self.assert_mismatch(parser, "List0: XXX")
+        self.assert_mismatch(parser, "List0: -123")
+
+
+    def test_create_missing_types__without_cardinality_fields_in_schema(self):
+        schemas = ["{}", "{:Number}", "{number3}", "{number4:Number}", "XXX"]
+        existing_types = {}
+        for schema in schemas:
+            new_types = Parser.create_missing_types(schema, existing_types)
+            self.assertEqual(len(new_types), 0)
+            self.assertEqual(new_types, {})
+
+    def test_create_missing_types__raises_error_if_primary_type_is_missing(self):
+        # -- HINT: primary type is not provided in type_dict (existing_types)
+        existing_types = {}
+        for schema, missing_types in self.SPECIAL_FIELD_TYPES_DATA:
+            with self.assertRaises(MissingTypeError):
+                Parser.create_missing_types(schema, existing_types)
+
+    def test_create_missing_types__if_special_types_are_missing(self):
+        existing_types = dict(Number=parse_number)
+        for schema, missing_types in self.SPECIAL_FIELD_TYPES_DATA:
+            new_types = Parser.create_missing_types(schema, existing_types)
+            self.assertSequenceEqual(set(new_types.keys()), set(missing_types))
+
+    def test_create_missing_types__if_special_types_exist(self):
+        existing_types = dict(Number=parse_number)
+        for schema, missing_types in self.SPECIAL_FIELD_TYPES_DATA:
+            # -- FIRST STEP: Prepare
+            new_types = Parser.create_missing_types(schema, existing_types)
+            self.assertGreater(len(new_types), 0)
+
+            # -- SECOND STEP: Now all needed special types should exist.
+            existing_types2 = existing_types.copy()
+            existing_types2.update(new_types)
+            new_types2 = Parser.create_missing_types(schema, existing_types2)
+            self.assertEqual(len(new_types2), 0)
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_parse.py b/tests/test_parse.py
new file mode 100644
index 0000000..df3b967
--- /dev/null
+++ b/tests/test_parse.py
@@ -0,0 +1,1013 @@
+# -*- encoding: utf8 -*-
+# -- BASED-ON: https://github.com/r1chardj0n3s/parse/test_parse.py
+# VERSION:  parse 1.12.0
+# Same as original file but uses bundled :mod:`parse_type.parse` module
+# instead of :mod:`parse` module
+#
+# NOTE: Part of the tests are/were providd by jenisys.
+# -- ORIGINAL-CODE STARTS-HERE ------------------------------------------------
+'''Test suite for parse.py
+
+This code is copyright 2011 eKit.com Inc (http://www.ekit.com/)
+See the end of the source file for the license of use.
+'''
+
+from __future__ import absolute_import
+import unittest
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+# -- ADAPTATION-END
+from datetime import datetime, time
+from decimal import Decimal
+import re
+
+# -- EXTENSION:
+import os
+PARSE_MODULE = os.environ.get("PARSE_TYPE_PARSE_MODULE", "parse_type.parse")
+if PARSE_MODULE.startswith("parse_type"):
+    # -- USE VENDOR MODULE: parse_type.parse (probably older that original)
+    from parse_type import parse
+else:
+    # -- USE ORIGINAL MODULE: parse
+    import parse
+# -- EXTENSION-END
+
+
+class TestPattern(unittest.TestCase):
+    def _test_expression(self, format, expression):
+        self.assertEqual(parse.Parser(format)._expression, expression)
+
+    def test_braces(self):
+        # pull a simple string out of another string
+        self._test_expression('{{ }}', r'\{ \}')
+
+    def test_fixed(self):
+        # pull a simple string out of another string
+        self._test_expression('{}', r'(.+?)')
+        self._test_expression('{} {}', r'(.+?) (.+?)')
+
+    def test_named(self):
+        # pull a named string out of another string
+        self._test_expression('{name}', r'(?P<name>.+?)')
+        self._test_expression('{name} {other}',
+            r'(?P<name>.+?) (?P<other>.+?)')
+
+    def test_named_typed(self):
+        # pull a named string out of another string
+        self._test_expression('{name:w}', r'(?P<name>\w+)')
+        self._test_expression('{name:w} {other:w}',
+            r'(?P<name>\w+) (?P<other>\w+)')
+
+    def test_beaker(self):
+        # skip some trailing whitespace
+        self._test_expression('{:<}', r'(.+?) *')
+
+    def test_left_fill(self):
+        # skip some trailing periods
+        self._test_expression('{:.<}', r'(.+?)\.*')
+
+    def test_bird(self):
+        # skip some trailing whitespace
+        self._test_expression('{:>}', r' *(.+?)')
+
+    def test_center(self):
+        # skip some surrounding whitespace
+        self._test_expression('{:^}', r' *(.+?) *')
+
+    def test_format_variety(self):
+        def _(fmt, matches):
+            d = parse.extract_format(fmt, {'spam': 'spam'})
+            for k in matches:
+                self.assertEqual(d.get(k), matches[k],
+                    'm["%s"]=%r, expect %r' % (k, d.get(k), matches[k]))
+
+        for t in '%obxegfdDwWsS':
+            _(t, dict(type=t))
+            _('10' + t, dict(type=t, width='10'))
+        _('05d', dict(type='d', width='5', zero=True))
+        _('<', dict(align='<'))
+        _('.<', dict(align='<', fill='.'))
+        _('>', dict(align='>'))
+        _('.>', dict(align='>', fill='.'))
+        _('^', dict(align='^'))
+        _('.^', dict(align='^', fill='.'))
+        _('x=d', dict(type='d', align='=', fill='x'))
+        _('d', dict(type='d'))
+        _('ti', dict(type='ti'))
+        _('spam', dict(type='spam'))
+
+        _('.^010d', dict(type='d', width='10', align='^', fill='.',
+            zero=True))
+        _('.2f', dict(type='f', precision='2'))
+        _('10.2f', dict(type='f', width='10', precision='2'))
+
+    def test_dot_separated_fields(self):
+        # this should just work and provide the named value
+        res = parse.parse('{hello.world}_{jojo.foo.baz}_{simple}', 'a_b_c')
+        assert res.named['hello.world'] == 'a'
+        assert res.named['jojo.foo.baz'] == 'b'
+        assert res.named['simple'] == 'c'
+
+    def test_dict_style_fields(self):
+        res = parse.parse('{hello[world]}_{hello[foo][baz]}_{simple}', 'a_b_c')
+        assert res.named['hello']['world'] == 'a'
+        assert res.named['hello']['foo']['baz'] == 'b'
+        assert res.named['simple'] == 'c'
+
+    def test_dot_separated_fields_name_collisions(self):
+        # this should just work and provide the named value
+        res = parse.parse('{a_.b}_{a__b}_{a._b}_{a___b}', 'a_b_c_d')
+        assert res.named['a_.b'] == 'a'
+        assert res.named['a__b'] == 'b'
+        assert res.named['a._b'] == 'c'
+        assert res.named['a___b'] == 'd'
+
+    def test_invalid_groupnames_are_handled_gracefully(self):
+        self.assertRaises(NotImplementedError, parse.parse,
+            "{hello['world']}", "doesn't work")
+
+
+class TestResult(unittest.TestCase):
+    def test_fixed_access(self):
+        r = parse.Result((1, 2), {}, None)
+        self.assertEqual(r[0], 1)
+        self.assertEqual(r[1], 2)
+        self.assertRaises(IndexError, r.__getitem__, 2)
+        self.assertRaises(KeyError, r.__getitem__, 'spam')
+
+    def test_named_access(self):
+        r = parse.Result((), {'spam': 'ham'}, None)
+        self.assertEqual(r['spam'], 'ham')
+        self.assertRaises(KeyError, r.__getitem__, 'ham')
+        self.assertRaises(IndexError, r.__getitem__, 0)
+
+    def test_contains(self):
+        r = parse.Result(('cat',), {'spam': 'ham'}, None)
+        self.assertTrue('spam' in r)
+        self.assertTrue('cat' not in r)
+        self.assertTrue('ham' not in r)
+
+
+class TestParse(unittest.TestCase):
+    def test_no_match(self):
+        # string does not match format
+        self.assertEqual(parse.parse('{{hello}}', 'hello'), None)
+
+    def test_nothing(self):
+        # do no actual parsing
+        r = parse.parse('{{hello}}', '{hello}')
+        self.assertEqual(r.fixed, ())
+        self.assertEqual(r.named, {})
+
+    def test_no_evaluate_result(self):
+        # pull a fixed value out of string
+        match = parse.parse('hello {}', 'hello world', evaluate_result=False)
+        r = match.evaluate_result()
+        self.assertEqual(r.fixed, ('world', ))
+
+    def test_regular_expression(self):
+        # match an actual regular expression
+        s = r'^(hello\s[wW]{}!+.*)$'
+        e = s.replace('{}', 'orld')
+        r = parse.parse(s, e)
+        self.assertEqual(r.fixed, ('orld',))
+        e = s.replace('{}', '.*?')
+        r = parse.parse(s, e)
+        self.assertEqual(r.fixed, ('.*?',))
+
+    def test_question_mark(self):
+        # issue9: make sure a ? in the parse string is handled correctly
+        r = parse.parse('"{}"?', '"teststr"?')
+        self.assertEqual(r[0], 'teststr')
+
+    def test_pipe(self):
+        # issue22: make sure a | in the parse string is handled correctly
+        r = parse.parse('| {}', '| teststr')
+        self.assertEqual(r[0], 'teststr')
+
+    def test_unicode(self):
+        # issue29: make sure unicode is parsable
+        r = parse.parse('{}', u't€ststr')
+        self.assertEqual(r[0], u't€ststr')
+
+    def test_hexadecimal(self):
+        # issue42: make sure bare hexadecimal isn't matched as "digits"
+        r = parse.parse('{:d}', 'abcdef')
+        self.assertIsNone(r)
+
+    def test_fixed(self):
+        # pull a fixed value out of string
+        r = parse.parse('hello {}', 'hello world')
+        self.assertEqual(r.fixed, ('world', ))
+
+    def test_left(self):
+        # pull left-aligned text out of string
+        r = parse.parse('{:<} world', 'hello       world')
+        self.assertEqual(r.fixed, ('hello', ))
+
+    def test_right(self):
+        # pull right-aligned text out of string
+        r = parse.parse('hello {:>}', 'hello       world')
+        self.assertEqual(r.fixed, ('world', ))
+
+    def test_center(self):
+        # pull center-aligned text out of string
+        r = parse.parse('hello {:^} world', 'hello  there     world')
+        self.assertEqual(r.fixed, ('there', ))
+
+    def test_typed(self):
+        # pull a named, typed values out of string
+        r = parse.parse('hello {:d} {:w}', 'hello 12 people')
+        self.assertEqual(r.fixed, (12, 'people'))
+        r = parse.parse('hello {:w} {:w}', 'hello 12 people')
+        self.assertEqual(r.fixed, ('12', 'people'))
+
+    def test_precision(self):
+        # pull a float out of a string
+        r = parse.parse('Pi = {:.7f}', 'Pi = 3.1415926')
+        self.assertEqual(r.fixed, (3.1415926, ))
+        r = parse.parse('Pi/10 = {:8.5f}', 'Pi/10 =  0.31415')
+        self.assertEqual(r.fixed, (0.31415, ))
+
+    def test_precision_fail(self):
+        # floats must have a leading zero
+        # IS THIS CORRECT?
+        r = parse.parse('Pi/10 = {:8.5f}', 'Pi/10 = .31415')
+        self.assertEqual(r, None)
+
+    def test_custom_type(self):
+        # use a custom type
+        r = parse.parse('{:shouty} {:spam}', 'hello world',
+            dict(shouty=lambda s: s.upper(),
+                spam=lambda s: ''.join(reversed(s))))
+        self.assertEqual(r.fixed, ('HELLO', 'dlrow'))
+        r = parse.parse('{:d}', '12', dict(d=lambda s: int(s) * 2))
+        self.assertEqual(r.fixed, (24,))
+        r = parse.parse('{:d}', '12')
+        self.assertEqual(r.fixed, (12,))
+
+    def test_typed_fail(self):
+        # pull a named, typed values out of string
+        self.assertEqual(parse.parse('hello {:d} {:w}', 'hello people 12'),
+            None)
+
+    def test_named(self):
+        # pull a named value out of string
+        r = parse.parse('hello {name}', 'hello world')
+        self.assertEqual(r.named, {'name': 'world'})
+
+    def test_named_repeated(self):
+        # test a name may be repeated
+        r = parse.parse('{n} {n}', 'x x')
+        self.assertEqual(r.named, {'n': 'x'})
+
+    def test_named_repeated_type(self):
+        # test a name may be repeated with type conversion
+        r = parse.parse('{n:d} {n:d}', '1 1')
+        self.assertEqual(r.named, {'n': 1})
+
+    def test_named_repeated_fail_value(self):
+        # test repeated name fails if value mismatches
+        r = parse.parse('{n} {n}', 'x y')
+        self.assertEqual(r, None)
+
+    def test_named_repeated_type_fail_value(self):
+        # test repeated name with type conversion fails if value mismatches
+        r = parse.parse('{n:d} {n:d}', '1 2')
+        self.assertEqual(r, None)
+
+    def test_named_repeated_type_mismatch(self):
+        # test repeated name with mismatched type
+        self.assertRaises(parse.RepeatedNameError, parse.compile,
+            '{n:d} {n:w}')
+
+    def test_mixed(self):
+        # pull a fixed and named values out of string
+        r = parse.parse('hello {} {name} {} {spam}',
+            'hello world and other beings')
+        self.assertEqual(r.fixed, ('world', 'other'))
+        self.assertEqual(r.named, dict(name='and', spam='beings'))
+
+    def test_named_typed(self):
+        # pull a named, typed values out of string
+        r = parse.parse('hello {number:d} {things}', 'hello 12 people')
+        self.assertEqual(r.named, dict(number=12, things='people'))
+        r = parse.parse('hello {number:w} {things}', 'hello 12 people')
+        self.assertEqual(r.named, dict(number='12', things='people'))
+
+    def test_named_aligned_typed(self):
+        # pull a named, typed values out of string
+        r = parse.parse('hello {number:<d} {things}', 'hello 12      people')
+        self.assertEqual(r.named, dict(number=12, things='people'))
+        r = parse.parse('hello {number:>d} {things}', 'hello      12 people')
+        self.assertEqual(r.named, dict(number=12, things='people'))
+        r = parse.parse('hello {number:^d} {things}',
+            'hello      12      people')
+        self.assertEqual(r.named, dict(number=12, things='people'))
+
+    def test_multiline(self):
+        r = parse.parse('hello\n{}\nworld', 'hello\nthere\nworld')
+        self.assertEqual(r.fixed[0], 'there')
+
+    def test_spans(self):
+        # test the string sections our fields come from
+        string = 'hello world'
+        r = parse.parse('hello {}', string)
+        self.assertEqual(r.spans, {0: (6, 11)})
+        start, end = r.spans[0]
+        self.assertEqual(string[start:end], r.fixed[0])
+
+        string = 'hello     world'
+        r = parse.parse('hello {:>}', string)
+        self.assertEqual(r.spans, {0: (10, 15)})
+        start, end = r.spans[0]
+        self.assertEqual(string[start:end], r.fixed[0])
+
+        string = 'hello 0x12 world'
+        r = parse.parse('hello {val:x} world', string)
+        self.assertEqual(r.spans, {'val': (6, 10)})
+        start, end = r.spans['val']
+        self.assertEqual(string[start:end], '0x%x' % r.named['val'])
+
+        string = 'hello world and other beings'
+        r = parse.parse('hello {} {name} {} {spam}', string)
+        self.assertEqual(r.spans, {0: (6, 11), 'name': (12, 15),
+            1: (16, 21), 'spam': (22, 28)})
+
+    def test_numbers(self):
+        # pull a numbers out of a string
+        def y(fmt, s, e, str_equals=False):
+            p = parse.compile(fmt)
+            r = p.parse(s)
+            if r is None:
+                self.fail('%r (%r) did not match %r' % (fmt, p._expression, s))
+            r = r.fixed[0]
+            if str_equals:
+                self.assertEqual(str(r), str(e),
+                    '%r found %r in %r, not %r' % (fmt, r, s, e))
+            else:
+                self.assertEqual(r, e,
+                    '%r found %r in %r, not %r' % (fmt, r, s, e))
+
+        def n(fmt, s, e):
+            if parse.parse(fmt, s) is not None:
+                self.fail('%r matched %r' % (fmt, s))
+        y('a {:d} b', 'a 0 b', 0)
+        y('a {:d} b', 'a 12 b', 12)
+        y('a {:5d} b', 'a    12 b', 12)
+        y('a {:5d} b', 'a   -12 b', -12)
+        y('a {:d} b', 'a -12 b', -12)
+        y('a {:d} b', 'a +12 b', 12)
+        y('a {:d} b', 'a  12 b', 12)
+        y('a {:d} b', 'a 0b1000 b', 8)
+        y('a {:d} b', 'a 0o1000 b', 512)
+        y('a {:d} b', 'a 0x1000 b', 4096)
+        y('a {:d} b', 'a 0xabcdef b', 0xabcdef)
+
+        y('a {:%} b', 'a 100% b', 1)
+        y('a {:%} b', 'a 50% b', .5)
+        y('a {:%} b', 'a 50.1% b', .501)
+
+        y('a {:n} b', 'a 100 b', 100)
+        y('a {:n} b', 'a 1,000 b', 1000)
+        y('a {:n} b', 'a 1.000 b', 1000)
+        y('a {:n} b', 'a -1,000 b', -1000)
+        y('a {:n} b', 'a 10,000 b', 10000)
+        y('a {:n} b', 'a 100,000 b', 100000)
+        n('a {:n} b', 'a 100,00 b', None)
+        y('a {:n} b', 'a 100.000 b', 100000)
+        y('a {:n} b', 'a 1.000.000 b', 1000000)
+
+        y('a {:f} b', 'a 12.0 b', 12.0)
+        y('a {:f} b', 'a -12.1 b', -12.1)
+        y('a {:f} b', 'a +12.1 b', 12.1)
+        n('a {:f} b', 'a 12 b', None)
+
+        y('a {:e} b', 'a 1.0e10 b', 1.0e10)
+        y('a {:e} b', 'a 1.0E10 b', 1.0e10)
+        y('a {:e} b', 'a 1.10000e10 b', 1.1e10)
+        y('a {:e} b', 'a 1.0e-10 b', 1.0e-10)
+        y('a {:e} b', 'a 1.0e+10 b', 1.0e10)
+        # can't actually test this one on values 'cos nan != nan
+        y('a {:e} b', 'a nan b', float('nan'), str_equals=True)
+        y('a {:e} b', 'a NAN b', float('nan'), str_equals=True)
+        y('a {:e} b', 'a inf b', float('inf'))
+        y('a {:e} b', 'a +inf b', float('inf'))
+        y('a {:e} b', 'a -inf b', float('-inf'))
+        y('a {:e} b', 'a INF b', float('inf'))
+        y('a {:e} b', 'a +INF b', float('inf'))
+        y('a {:e} b', 'a -INF b', float('-inf'))
+
+        y('a {:g} b', 'a 1 b', 1)
+        y('a {:g} b', 'a 1e10 b', 1e10)
+        y('a {:g} b', 'a 1.0e10 b', 1.0e10)
+        y('a {:g} b', 'a 1.0E10 b', 1.0e10)
+
+        y('a {:b} b', 'a 1000 b', 8)
+        y('a {:b} b', 'a 0b1000 b', 8)
+        y('a {:o} b', 'a 12345670 b', int('12345670', 8))
+        y('a {:o} b', 'a 0o12345670 b', int('12345670', 8))
+        y('a {:x} b', 'a 1234567890abcdef b', 0x1234567890abcdef)
+        y('a {:x} b', 'a 1234567890ABCDEF b', 0x1234567890ABCDEF)
+        y('a {:x} b', 'a 0x1234567890abcdef b', 0x1234567890abcdef)
+        y('a {:x} b', 'a 0x1234567890ABCDEF b', 0x1234567890ABCDEF)
+
+        y('a {:05d} b', 'a 00001 b', 1)
+        y('a {:05d} b', 'a -00001 b', -1)
+        y('a {:05d} b', 'a +00001 b', 1)
+        y('a {:02d} b', 'a 10 b', 10)
+
+        y('a {:=d} b', 'a 000012 b', 12)
+        y('a {:x=5d} b', 'a xxx12 b', 12)
+        y('a {:x=5d} b', 'a -xxx12 b', -12)
+
+    def test_hex_looks_like_binary_issue65(self):
+        r = parse.parse('a {:x} b', 'a 0B b')
+        self.assertEqual(r[0], 11)
+        r = parse.parse('a {:x} b', 'a 0B1 b')
+        self.assertEqual(r[0], 1)
+
+    def test_two_datetimes(self):
+        r = parse.parse('a {:ti} {:ti} b', 'a 1997-07-16 2012-08-01 b')
+        self.assertEqual(len(r.fixed), 2)
+        self.assertEqual(r[0], datetime(1997, 7, 16))
+        self.assertEqual(r[1], datetime(2012, 8, 1))
+
+    def test_datetimes(self):
+        def y(fmt, s, e, tz=None):
+            p = parse.compile(fmt)
+            r = p.parse(s)
+            if r is None:
+                self.fail('%r (%r) did not match %r' % (fmt, p._expression, s))
+            r = r.fixed[0]
+            try:
+                self.assertEqual(r, e,
+                    '%r found %r in %r, not %r' % (fmt, r, s, e))
+            except ValueError:
+                self.fail('%r found %r in %r, not %r' % (fmt, r, s, e))
+
+            if tz is not None:
+                self.assertEqual(r.tzinfo, tz,
+                    '%r found TZ %r in %r, not %r' % (fmt, r.tzinfo, s, e))
+
+        def n(fmt, s, e):
+            if parse.parse(fmt, s) is not None:
+                self.fail('%r matched %r' % (fmt, s))
+
+        utc = parse.FixedTzOffset(0, 'UTC')
+        aest = parse.FixedTzOffset(10 * 60, '+1000')
+        tz60 = parse.FixedTzOffset(60, '+01:00')
+
+        # ISO 8660 variants
+        # YYYY-MM-DD (eg 1997-07-16)
+        y('a {:ti} b', 'a 1997-07-16 b', datetime(1997, 7, 16))
+
+        # YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00)
+        y('a {:ti} b', 'a 1997-07-16 19:20 b',
+            datetime(1997, 7, 16, 19, 20, 0))
+        y('a {:ti} b', 'a 1997-07-16T19:20 b',
+            datetime(1997, 7, 16, 19, 20, 0))
+        y('a {:ti} b', 'a 1997-07-16T19:20Z b',
+            datetime(1997, 7, 16, 19, 20, tzinfo=utc))
+        y('a {:ti} b', 'a 1997-07-16T19:20+0100 b',
+            datetime(1997, 7, 16, 19, 20, tzinfo=tz60))
+        y('a {:ti} b', 'a 1997-07-16T19:20+01:00 b',
+            datetime(1997, 7, 16, 19, 20, tzinfo=tz60))
+        y('a {:ti} b', 'a 1997-07-16T19:20 +01:00 b',
+            datetime(1997, 7, 16, 19, 20, tzinfo=tz60))
+
+        # YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00)
+        y('a {:ti} b', 'a 1997-07-16 19:20:30 b',
+            datetime(1997, 7, 16, 19, 20, 30))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30 b',
+            datetime(1997, 7, 16, 19, 20, 30))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30Z b',
+            datetime(1997, 7, 16, 19, 20, 30, tzinfo=utc))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30+01:00 b',
+            datetime(1997, 7, 16, 19, 20, 30, tzinfo=tz60))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30 +01:00 b',
+            datetime(1997, 7, 16, 19, 20, 30, tzinfo=tz60))
+
+        # YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00)
+        y('a {:ti} b', 'a 1997-07-16 19:20:30.500000 b',
+            datetime(1997, 7, 16, 19, 20, 30, 500000))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30.500000 b',
+            datetime(1997, 7, 16, 19, 20, 30, 500000))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30.5Z b',
+            datetime(1997, 7, 16, 19, 20, 30, 500000, tzinfo=utc))
+        y('a {:ti} b', 'a 1997-07-16T19:20:30.5+01:00 b',
+            datetime(1997, 7, 16, 19, 20, 30, 500000, tzinfo=tz60))
+
+        aest_d = datetime(2011, 11, 21, 10, 21, 36, tzinfo=aest)
+        dt = datetime(2011, 11, 21, 10, 21, 36)
+        dt00 = datetime(2011, 11, 21, 10, 21)
+        d = datetime(2011, 11, 21)
+
+        # te   RFC2822 e-mail format        datetime
+        y('a {:te} b', 'a Mon, 21 Nov 2011 10:21:36 +1000 b', aest_d)
+        y('a {:te} b', 'a Mon, 21 Nov 2011 10:21:36 +10:00 b', aest_d)
+        y('a {:te} b', 'a 21 Nov 2011 10:21:36 +1000 b', aest_d)
+
+        # tg   global (day/month) format datetime
+        y('a {:tg} b', 'a 21/11/2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:tg} b', 'a 21/11/2011 10:21:36 AM +10:00 b', aest_d)
+        y('a {:tg} b', 'a 21-11-2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:tg} b', 'a 21/11/2011 10:21:36 +1000 b', aest_d)
+        y('a {:tg} b', 'a 21/11/2011 10:21:36 b', dt)
+        y('a {:tg} b', 'a 21/11/2011 10:21 b', dt00)
+        y('a {:tg} b', 'a 21-11-2011 b', d)
+        y('a {:tg} b', 'a 21-Nov-2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:tg} b', 'a 21-November-2011 10:21:36 AM +1000 b', aest_d)
+
+        # ta   US (month/day) format     datetime
+        y('a {:ta} b', 'a 11/21/2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:ta} b', 'a 11/21/2011 10:21:36 AM +10:00 b', aest_d)
+        y('a {:ta} b', 'a 11-21-2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:ta} b', 'a 11/21/2011 10:21:36 +1000 b', aest_d)
+        y('a {:ta} b', 'a 11/21/2011 10:21:36 b', dt)
+        y('a {:ta} b', 'a 11/21/2011 10:21 b', dt00)
+        y('a {:ta} b', 'a 11-21-2011 b', d)
+        y('a {:ta} b', 'a Nov-21-2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:ta} b', 'a November-21-2011 10:21:36 AM +1000 b', aest_d)
+        y('a {:ta} b', 'a November-21-2011 b', d)
+
+        # ts   Linux System log format        datetime
+        y('a {:ts} b', 'a Nov 21 10:21:36 b',  datetime(datetime.today().year, 11, 21, 10, 21, 36))
+        y('a {:ts} b', 'a Nov  1 10:21:36 b',  datetime(datetime.today().year, 11, 1, 10, 21, 36))
+        y('a {:ts} b', 'a Nov  1 03:21:36 b',  datetime(datetime.today().year, 11, 1, 3, 21, 36))
+
+        # th   HTTP log format date/time                   datetime
+        y('a {:th} b', 'a 21/Nov/2011:10:21:36 +1000 b', aest_d)
+        y('a {:th} b', 'a 21/Nov/2011:10:21:36 +10:00 b', aest_d)
+
+        d = datetime(2011, 11, 21, 10, 21, 36)
+
+        # tc   ctime() format           datetime
+        y('a {:tc} b', 'a Mon Nov 21 10:21:36 2011 b', d)
+
+        t530 = parse.FixedTzOffset(-5 * 60 - 30, '-5:30')
+        t830 = parse.FixedTzOffset(-8 * 60 - 30, '-8:30')
+
+        # tt   Time                                        time
+        y('a {:tt} b', 'a 10:21:36 AM +1000 b', time(10, 21, 36, tzinfo=aest))
+        y('a {:tt} b', 'a 10:21:36 AM +10:00 b', time(10, 21, 36, tzinfo=aest))
+        y('a {:tt} b', 'a 10:21:36 AM b', time(10, 21, 36))
+        y('a {:tt} b', 'a 10:21:36 PM b', time(22, 21, 36))
+        y('a {:tt} b', 'a 10:21:36 b', time(10, 21, 36))
+        y('a {:tt} b', 'a 10:21 b', time(10, 21))
+        y('a {:tt} b', 'a 10:21:36 PM -5:30 b', time(22, 21, 36, tzinfo=t530))
+        y('a {:tt} b', 'a 10:21:36 PM -530 b', time(22, 21, 36, tzinfo=t530))
+        y('a {:tt} b', 'a 10:21:36 PM -05:30 b', time(22, 21, 36, tzinfo=t530))
+        y('a {:tt} b', 'a 10:21:36 PM -0530 b', time(22, 21, 36, tzinfo=t530))
+        y('a {:tt} b', 'a 10:21:36 PM -08:30 b', time(22, 21, 36, tzinfo=t830))
+        y('a {:tt} b', 'a 10:21:36 PM -0830 b', time(22, 21, 36, tzinfo=t830))
+
+    def test_datetime_group_count(self):
+        # test we increment the group count correctly for datetimes
+        r = parse.parse('{:ti} {}', '1972-01-01 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+        r = parse.parse('{:tg} {}', '1-1-1972 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+        r = parse.parse('{:ta} {}', '1-1-1972 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+        r = parse.parse('{:th} {}', '21/Nov/2011:10:21:36 +1000 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+        r = parse.parse('{:te} {}', '21 Nov 2011 10:21:36 +1000 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+        r = parse.parse('{:tc} {}', 'Mon Nov 21 10:21:36 2011 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+        r = parse.parse('{:tt} {}', '10:21 spam')
+        self.assertEqual(r.fixed[1], 'spam')
+
+    def test_mixed_types(self):
+        # stress-test: pull one of everything out of a string
+        r = parse.parse('''
+            letters: {:w}
+            non-letters: {:W}
+            whitespace: "{:s}"
+            non-whitespace: \t{:S}\n
+            digits: {:d} {:d}
+            non-digits: {:D}
+            numbers with thousands: {:n}
+            fixed-point: {:f}
+            floating-point: {:e}
+            general numbers: {:g} {:g}
+            binary: {:b}
+            octal: {:o}
+            hex: {:x}
+            ISO 8601 e.g. {:ti}
+            RFC2822 e.g. {:te}
+            Global e.g. {:tg}
+            US e.g. {:ta}
+            ctime() e.g. {:tc}
+            HTTP e.g. {:th}
+            time: {:tt}
+            final value: {}
+        ''',
+        '''
+            letters: abcdef_GHIJLK
+            non-letters: !@#%$ *^%
+            whitespace: "   \t\n"
+            non-whitespace: \tabc\n
+            digits: 12345 0b1011011
+            non-digits: abcdef
+            numbers with thousands: 1,000
+            fixed-point: 100.2345
+            floating-point: 1.1e-10
+            general numbers: 1 1.1
+            binary: 0b1000
+            octal: 0o1000
+            hex: 0x1000
+            ISO 8601 e.g. 1972-01-20T10:21:36Z
+            RFC2822 e.g. Mon, 20 Jan 1972 10:21:36 +1000
+            Global e.g. 20/1/1972 10:21:36 AM +1:00
+            US e.g. 1/20/1972 10:21:36 PM +10:30
+            ctime() e.g. Sun Sep 16 01:03:52 1973
+            HTTP e.g. 21/Nov/2011:00:07:11 +0000
+            time: 10:21:36 PM -5:30
+            final value: spam
+        ''')
+        self.assertNotEqual(r, None)
+        self.assertEqual(r.fixed[22], 'spam')
+
+    def test_mixed_type_variant(self):
+        r = parse.parse('''
+            letters: {:w}
+            non-letters: {:W}
+            whitespace: "{:s}"
+            non-whitespace: \t{:S}\n
+            digits: {:d}
+            non-digits: {:D}
+            numbers with thousands: {:n}
+            fixed-point: {:f}
+            floating-point: {:e}
+            general numbers: {:g} {:g}
+            binary: {:b}
+            octal: {:o}
+            hex: {:x}
+            ISO 8601 e.g. {:ti}
+            RFC2822 e.g. {:te}
+            Global e.g. {:tg}
+            US e.g. {:ta}
+            ctime() e.g. {:tc}
+            HTTP e.g. {:th}
+            time: {:tt}
+            final value: {}
+        ''',
+        '''
+            letters: abcdef_GHIJLK
+            non-letters: !@#%$ *^%
+            whitespace: "   \t\n"
+            non-whitespace: \tabc\n
+            digits: 0xabcdef
+            non-digits: abcdef
+            numbers with thousands: 1.000.000
+            fixed-point: 0.00001
+            floating-point: NAN
+            general numbers: 1.1e10 nan
+            binary: 0B1000
+            octal: 0O1000
+            hex: 0X1000
+            ISO 8601 e.g. 1972-01-20T10:21:36Z
+            RFC2822 e.g. Mon, 20 Jan 1972 10:21:36 +1000
+            Global e.g. 20/1/1972 10:21:36 AM +1:00
+            US e.g. 1/20/1972 10:21:36 PM +10:30
+            ctime() e.g. Sun Sep 16 01:03:52 1973
+            HTTP e.g. 21/Nov/2011:00:07:11 +0000
+            time: 10:21:36 PM -5:30
+            final value: spam
+        ''')
+        self.assertNotEqual(r, None)
+        self.assertEqual(r.fixed[21], 'spam')
+
+    def test_too_many_fields(self):
+        # Python 3.5 removed the limit of 100 named groups in a regular expression,
+        # so only test for the exception if the limit exists.
+        try:
+            re.compile("".join("(?P<n{n}>{n}-)".format(n=i) for i in range(101)))
+        except AssertionError:
+            p = parse.compile('{:ti}' * 15)
+            self.assertRaises(parse.TooManyFields, p.parse, '')
+
+    def test_letters(self):
+        res = parse.parse('{:l}', '')
+        self.assertIsNone(res)
+        res = parse.parse('{:l}', 'sPaM')
+        self.assertEqual(res.fixed, ('sPaM', ))
+        res = parse.parse('{:l}', 'sP4M')
+        self.assertIsNone(res)
+        res = parse.parse('{:l}', 'sP_M')
+        self.assertIsNone(res)
+
+
+class TestSearch(unittest.TestCase):
+    def test_basic(self):
+        # basic search() test
+        r = parse.search('a {} c', ' a b c ')
+        self.assertEqual(r.fixed, ('b',))
+
+    def test_multiline(self):
+        # multiline search() test
+        r = parse.search('age: {:d}\n', 'name: Rufus\nage: 42\ncolor: red\n')
+        self.assertEqual(r.fixed, (42,))
+
+    def test_pos(self):
+        # basic search() test
+        r = parse.search('a {} c', ' a b c ', 2)
+        self.assertEqual(r, None)
+
+    def test_no_evaluate_result(self):
+        match = parse.search('age: {:d}\n', 'name: Rufus\nage: 42\ncolor: red\n', evaluate_result=False)
+        r = match.evaluate_result()
+        self.assertEqual(r.fixed, (42,))
+
+
+class TestFindall(unittest.TestCase):
+    def test_findall(self):
+        # basic findall() test
+        s = ''.join(r.fixed[0] for r in parse.findall(">{}<",
+            "<p>some <b>bold</b> text</p>"))
+        self.assertEqual(s, "some bold text")
+
+    def test_no_evaluate_result(self):
+        # basic findall() test
+        s = ''.join(m.evaluate_result().fixed[0] for m in parse.findall(">{}<",
+            "<p>some <b>bold</b> text</p>", evaluate_result=False))
+        self.assertEqual(s, "some bold text")
+
+
+class TestBugs(unittest.TestCase):
+    def test_named_date_issue7(self):
+        r = parse.parse('on {date:ti}', 'on 2012-09-17')
+        self.assertEqual(r['date'], datetime(2012, 9, 17, 0, 0, 0))
+
+        # fix introduced regressions
+        r = parse.parse('a {:ti} b', 'a 1997-07-16T19:20 b')
+        self.assertEqual(r[0], datetime(1997, 7, 16, 19, 20, 0))
+        r = parse.parse('a {:ti} b', 'a 1997-07-16T19:20Z b')
+        utc = parse.FixedTzOffset(0, 'UTC')
+        self.assertEqual(r[0], datetime(1997, 7, 16, 19, 20, tzinfo=utc))
+        r = parse.parse('a {date:ti} b', 'a 1997-07-16T19:20Z b')
+        self.assertEqual(r['date'], datetime(1997, 7, 16, 19, 20, tzinfo=utc))
+
+    def test_dotted_type_conversion_pull_8(self):
+        # test pull request 8 which fixes type conversion related to dotted
+        # names being applied correctly
+        r = parse.parse('{a.b:d}', '1')
+        self.assertEqual(r['a.b'], 1)
+        r = parse.parse('{a_b:w} {a.b:d}', '1 2')
+        self.assertEqual(r['a_b'], '1')
+        self.assertEqual(r['a.b'], 2)
+
+    def test_pm_overflow_issue16(self):
+        r = parse.parse('Meet at {:tg}', 'Meet at 1/2/2011 12:45 PM')
+        self.assertEqual(r[0], datetime(2011, 2, 1, 12, 45))
+
+    def test_pm_handling_issue57(self):
+        r = parse.parse('Meet at {:tg}', 'Meet at 1/2/2011 12:15 PM')
+        self.assertEqual(r[0], datetime(2011, 2, 1, 12, 15))
+        r = parse.parse('Meet at {:tg}', 'Meet at 1/2/2011 12:15 AM')
+        self.assertEqual(r[0], datetime(2011, 2, 1, 0, 15))
+
+    def test_user_type_with_group_count_issue60(self):
+        @parse.with_pattern(r'((\w+))', regex_group_count=2)
+        def parse_word_and_covert_to_uppercase(text):
+            return text.strip().upper()
+
+        @parse.with_pattern(r'\d+')
+        def parse_number(text):
+            return int(text)
+
+        # -- CASE: Use named (OK)
+        type_map = dict(Name=parse_word_and_covert_to_uppercase,
+                        Number=parse_number)
+        r = parse.parse('Hello {name:Name} {number:Number}',
+                        'Hello Alice 42', extra_types=type_map)
+        self.assertEqual(r.named, dict(name='ALICE', number=42))
+
+        # -- CASE: Use unnamed/fixed (problematic)
+        r = parse.parse('Hello {:Name} {:Number}',
+                        'Hello Alice 42', extra_types=type_map)
+        self.assertEqual(r[0], 'ALICE')
+        self.assertEqual(r[1], 42)
+
+    def test_unmatched_brace_doesnt_match(self):
+        r = parse.parse("{who.txt", "hello")
+        self.assertIsNone(r)
+
+
+# -----------------------------------------------------------------------------
+# TEST SUPPORT FOR: TestParseType
+# -----------------------------------------------------------------------------
+class TestParseType(unittest.TestCase):
+
+    def assert_match(self, parser, text, param_name, expected):
+        result = parser.parse(text)
+        self.assertEqual(result[param_name], expected)
+
+    def assert_mismatch(self, parser, text, param_name):
+        result = parser.parse(text)
+        self.assertTrue(result is None)
+
+    def assert_fixed_match(self, parser, text, expected):
+        result = parser.parse(text)
+        self.assertEqual(result.fixed, expected)
+
+    def assert_fixed_mismatch(self, parser, text):
+        result = parser.parse(text)
+        self.assertEqual(result, None)
+
+    def test_pattern_should_be_used(self):
+        def parse_number(text):
+            return int(text)
+        parse_number.pattern = r"\d+"
+        parse_number.name = "Number"    # For testing only.
+
+        extra_types = {parse_number.name: parse_number}
+        format = "Value is {number:Number} and..."
+        parser = parse.Parser(format, extra_types)
+
+        self.assert_match(parser, "Value is 42 and...", "number", 42)
+        self.assert_match(parser, "Value is 00123 and...", "number", 123)
+        self.assert_mismatch(parser, "Value is ALICE and...", "number")
+        self.assert_mismatch(parser, "Value is -123 and...", "number")
+
+    def test_pattern_should_be_used2(self):
+        def parse_yesno(text):
+            return parse_yesno.mapping[text.lower()]
+        parse_yesno.mapping = {
+            "yes": True, "no": False,
+            "on": True, "off": False,
+            "true": True, "false": False,
+        }
+        parse_yesno.pattern = r"|".join(parse_yesno.mapping.keys())
+        parse_yesno.name = "YesNo"      # For testing only.
+
+        extra_types = {parse_yesno.name: parse_yesno}
+        format = "Answer: {answer:YesNo}"
+        parser = parse.Parser(format, extra_types)
+
+        # -- ENSURE: Known enum values are correctly extracted.
+        for value_name, value in parse_yesno.mapping.items():
+            text = "Answer: %s" % value_name
+            self.assert_match(parser, text, "answer", value)
+
+        # -- IGNORE-CASE: In parsing, calls type converter function !!!
+        self.assert_match(parser, "Answer: YES", "answer", True)
+        self.assert_mismatch(parser, "Answer: __YES__", "answer")
+
+    def test_with_pattern(self):
+        ab_vals = dict(a=1, b=2)
+
+        @parse.with_pattern(r'[ab]')
+        def ab(text):
+            return ab_vals[text]
+
+        parser = parse.Parser('test {result:ab}', {'ab': ab})
+        self.assert_match(parser, 'test a', 'result', 1)
+        self.assert_match(parser, 'test b', 'result', 2)
+        self.assert_mismatch(parser, "test c", "result")
+
+    def test_with_pattern_and_regex_group_count(self):
+        # -- SPECIAL-CASE: Regex-grouping is used in user-defined type
+        # NOTE: Missing or wroung regex_group_counts cause problems
+        #       with parsing following params.
+        @parse.with_pattern(r'(meter|kilometer)', regex_group_count=1)
+        def parse_unit(text):
+            return text.strip()
+
+        @parse.with_pattern(r'\d+')
+        def parse_number(text):
+            return int(text)
+
+        type_converters = dict(Number=parse_number, Unit=parse_unit)
+        # -- CASE: Unnamed-params (affected)
+        parser = parse.Parser('test {:Unit}-{:Number}', type_converters)
+        self.assert_fixed_match(parser, 'test meter-10', ('meter', 10))
+        self.assert_fixed_match(parser, 'test kilometer-20', ('kilometer', 20))
+        self.assert_fixed_mismatch(parser, 'test liter-30')
+
+        # -- CASE: Named-params (uncritical; should not be affected)
+        # REASON: Named-params have additional, own grouping.
+        parser2 = parse.Parser('test {unit:Unit}-{value:Number}', type_converters)
+        self.assert_match(parser2, 'test meter-10', 'unit', 'meter')
+        self.assert_match(parser2, 'test meter-10', 'value', 10)
+        self.assert_match(parser2, 'test kilometer-20', 'unit', 'kilometer')
+        self.assert_match(parser2, 'test kilometer-20', 'value', 20)
+        self.assert_mismatch(parser2, 'test liter-30', 'unit')
+
+    def test_with_pattern_and_wrong_regex_group_count_raises_error(self):
+        # -- SPECIAL-CASE:
+        # Regex-grouping is used in user-defined type, but wrong value is provided.
+        @parse.with_pattern(r'(meter|kilometer)', regex_group_count=1)
+        def parse_unit(text):
+            return text.strip()
+
+        @parse.with_pattern(r'\d+')
+        def parse_number(text):
+            return int(text)
+
+        # -- CASE: Unnamed-params (affected)
+        BAD_REGEX_GROUP_COUNTS_AND_ERRORS = [
+            (None, ValueError),
+            (0, ValueError),
+            (2, IndexError),
+        ]
+        for bad_regex_group_count, error_class in BAD_REGEX_GROUP_COUNTS_AND_ERRORS:
+            parse_unit.regex_group_count = bad_regex_group_count    # -- OVERRIDE-HERE
+            type_converters = dict(Number=parse_number, Unit=parse_unit)
+            parser = parse.Parser('test {:Unit}-{:Number}', type_converters)
+            self.assertRaises(error_class, parser.parse, 'test meter-10')
+
+    def test_with_pattern_and_regex_group_count_is_none(self):
+        # -- CORNER-CASE: Increase code-coverage.
+        data_values = dict(a=1, b=2)
+
+        @parse.with_pattern(r'[ab]')
+        def parse_data(text):
+            return data_values[text]
+        parse_data.regex_group_count = None     # ENFORCE: None
+
+        # -- CASE: Unnamed-params
+        parser = parse.Parser('test {:Data}', {'Data': parse_data})
+        self.assert_fixed_match(parser, 'test a', (1,))
+        self.assert_fixed_match(parser, 'test b', (2,))
+        self.assert_fixed_mismatch(parser, 'test c')
+
+        # -- CASE: Named-params
+        parser2 = parse.Parser('test {value:Data}', {'Data': parse_data})
+        self.assert_match(parser2, 'test a', 'value', 1)
+        self.assert_match(parser2, 'test b', 'value', 2)
+        self.assert_mismatch(parser2, 'test c', 'value')
+
+    def test_case_sensitivity(self):
+        r = parse.parse('SPAM {} SPAM', 'spam spam spam')
+        self.assertEqual(r[0], 'spam')
+        self.assertEqual(parse.parse('SPAM {} SPAM', 'spam spam spam', case_sensitive=True), None)
+
+    def test_decimal_value(self):
+        value = Decimal('5.5')
+        str_ = 'test {}'.format(value)
+        parser = parse.Parser('test {:F}')
+        self.assertEqual(parser.parse(str_)[0], value)
+
+    def test_width_str(self):
+        res = parse.parse('{:.2}{:.2}', 'look')
+        self.assertEqual(res.fixed, ('lo', 'ok'))
+        res = parse.parse('{:2}{:2}', 'look')
+        self.assertEqual(res.fixed, ('lo', 'ok'))
+        res = parse.parse('{:4}{}', 'look at that')
+        self.assertEqual(res.fixed, ('look', ' at that'))
+
+    def test_width_constraints(self):
+        res = parse.parse('{:4}', 'looky')
+        self.assertEqual(res.fixed, ('looky', ))
+        res = parse.parse('{:4.4}', 'looky')
+        self.assertIsNone(res)
+        res = parse.parse('{:4.4}', 'ook')
+        self.assertIsNone(res)
+        res = parse.parse('{:4}{:.4}', 'look at that')
+        self.assertEqual(res.fixed, ('look at ', 'that'))
+
+    def test_width_multi_int(self):
+        res = parse.parse('{:02d}{:02d}', '0440')
+        self.assertEqual(res.fixed, (4, 40))
+        res = parse.parse('{:03d}{:d}', '04404')
+        self.assertEqual(res.fixed, (44, 4))
+
+    def test_width_empty_input(self):
+        res = parse.parse('{:.2}', '')
+        self.assertIsNone(res)
+        res = parse.parse('{:2}', 'l')
+        self.assertIsNone(res)
+        res = parse.parse('{:2d}', '')
+        self.assertIsNone(res)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2011 eKit.com Inc (http://www.ekit.com/)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# vim: set filetype=python ts=4 sw=4 et si tw=75
diff --git a/tests/test_parse_decorator.py b/tests/test_parse_decorator.py
new file mode 100755
index 0000000..bb1972a
--- /dev/null
+++ b/tests/test_parse_decorator.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# pylint: disable=invalid-name, missing-docstring, too-few-public-methods
+"""
+Integrated into :mod:`parse` module.
+"""
+
+from __future__ import absolute_import
+import unittest
+import parse
+from parse_type import build_type_dict
+from .parse_type_test import ParseTypeTestCase
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE: TestParseTypeWithPatternDecorator
+# -----------------------------------------------------------------------------
+class TestParseTypeWithPatternDecorator(ParseTypeTestCase):
+    r"""
+    Test the pattern decorator for type-converter (parse_type) functions.
+
+        >>> def parse_number(text):
+        ...     return int(text)
+        >>> parse_number.pattern = r"\d+"
+
+    is equivalent to:
+
+        >>> import parse
+        >>> @parse.with_pattern(r"\d+")
+        ... def parse_number(text):
+        ...     return int(text)
+
+        >>> assert hasattr(parse_number, "pattern")
+        >>> assert parse_number.pattern == r"\d+"
+    """
+
+    def assert_decorated_with_pattern(self, func, expected_pattern):
+        self.assertTrue(callable(func))
+        self.assertTrue(hasattr(func, "pattern"))
+        self.assertEqual(func.pattern, expected_pattern)
+
+    def assert_converter_call(self, func, text, expected_value):
+        value = func(text)
+        self.assertEqual(value, expected_value)
+
+    # -- TESTS:
+    def test_function_with_pattern_decorator(self):
+        @parse.with_pattern(r"\d+")
+        def parse_number(text):
+            return int(text)
+
+        self.assert_decorated_with_pattern(parse_number, r"\d+")
+        self.assert_converter_call(parse_number, "123", 123)
+
+    def test_classmethod_with_pattern_decorator(self):
+        choice_pattern = r"Alice|Bob|Charly"
+        class C(object):
+            @classmethod
+            @parse.with_pattern(choice_pattern)
+            def parse_choice(cls, text):
+                return text
+
+        self.assert_decorated_with_pattern(C.parse_choice, choice_pattern)
+        self.assert_converter_call(C.parse_choice, "Alice", "Alice")
+
+    def test_staticmethod_with_pattern_decorator(self):
+        choice_pattern = r"Alice|Bob|Charly"
+        class S(object):
+            @staticmethod
+            @parse.with_pattern(choice_pattern)
+            def parse_choice(text):
+                return text
+
+        self.assert_decorated_with_pattern(S.parse_choice, choice_pattern)
+        self.assert_converter_call(S.parse_choice, "Bob", "Bob")
+
+    def test_decorated_function_with_parser(self):
+        # -- SETUP:
+        @parse.with_pattern(r"\d+")
+        def parse_number(text):
+            return int(text)
+
+        parse_number.name = "Number" #< For test automation.
+        more_types = build_type_dict([parse_number])
+        schema = "Test: {number:Number}"
+        parser = parse.Parser(schema, more_types)
+
+        # -- PERFORM TESTS:
+        # pylint: disable=bad-whitespace
+        self.assert_match(parser, "Test: 1",   "number", 1)
+        self.assert_match(parser, "Test: 42",  "number", 42)
+        self.assert_match(parser, "Test: 123", "number", 123)
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Test: x",    "number")  # Not a Number.
+        self.assert_mismatch(parser, "Test: -1",   "number")  # Negative.
+        self.assert_mismatch(parser, "Test: a, b", "number")  # List of ...
+
+    def test_decorated_classmethod_with_parser(self):
+        # -- SETUP:
+        class C(object):
+            @classmethod
+            @parse.with_pattern(r"Alice|Bob|Charly")
+            def parse_person(cls, text):
+                return text
+
+        more_types = {"Person": C.parse_person}
+        schema = "Test: {person:Person}"
+        parser = parse.Parser(schema, more_types)
+
+        # -- PERFORM TESTS:
+        # pylint: disable=bad-whitespace
+        self.assert_match(parser, "Test: Alice", "person", "Alice")
+        self.assert_match(parser, "Test: Bob",   "person", "Bob")
+
+        # -- PARSE MISMATCH:
+        self.assert_mismatch(parser, "Test: ", "person")        # Missing.
+        self.assert_mismatch(parser, "Test: BAlice", "person")  # Similar1.
+        self.assert_mismatch(parser, "Test: Boby", "person")    # Similar2.
+        self.assert_mismatch(parser, "Test: a",    "person")    # INVALID ...
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/test_parse_util.py b/tests/test_parse_util.py
new file mode 100644
index 0000000..5642277
--- /dev/null
+++ b/tests/test_parse_util.py
@@ -0,0 +1,415 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test suite to test the :mod:`parse_type.parse_util` module.
+"""
+
+from __future__ import absolute_import, print_function
+from .parse_type_test import TestCase, unittest
+from parse_type.parse_util \
+    import Field, FieldParser, FormatSpec, make_format_spec
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE:
+# -----------------------------------------------------------------------------
+class TestField(TestCase):
+    EMPTY_FORMAT_FIELDS = [
+        Field(),               #< Empty field.
+        Field("name"),         #< Named field without format.
+        Field("name", ""),     #< Named field with format=empty-string.
+        Field(format=""),      #< Field with format=empty-string.
+    ]
+    NONEMPTY_FORMAT_FIELDS = [
+        Field(format="Number"),    #< Typed field without name".
+        Field("name", "Number"),   #< Named and typed field".
+    ]
+    INVALID_FORMAT_FIELDS = [
+        Field(format="<"),     #< Align without type.
+        Field(format="_<"),    #< Fill and align without type.
+        Field(format="_<10"),  #< Fill, align and width without type.
+        Field(format="_<098"), #< Fill, align, zero and width without type.
+    ]
+    FIELDS = EMPTY_FORMAT_FIELDS + NONEMPTY_FORMAT_FIELDS + INVALID_FORMAT_FIELDS
+
+    def test_is_typed__returns_true_for_nonempty_format(self):
+        fields = self.NONEMPTY_FORMAT_FIELDS + self.INVALID_FORMAT_FIELDS
+        for field in fields:
+            self.assertTrue(field.has_format, "Field: %s" % field)
+
+    def test_is_typed__returns_false_for_empty_format(self):
+        fields = self.EMPTY_FORMAT_FIELDS
+        for field in fields:
+            self.assertFalse(field.has_format, "Field: %s" % field)
+
+    def test_format_spec__returns_none_if_format_is_empty(self):
+        for field in self.EMPTY_FORMAT_FIELDS:
+            self.assertIsNone(field.format_spec, "Field: %s" % field)
+
+    def test_format_spec__if_format_is_nonempty_and_valid(self):
+        for field in self.NONEMPTY_FORMAT_FIELDS:
+            self.assertIsNotNone(field.format_spec)
+            self.assertIsInstance(field.format_spec, FormatSpec)
+
+    def test_format_spec__raises_error_if_nonempty_format_is_invalid(self):
+        for field in self.INVALID_FORMAT_FIELDS:
+            with self.assertRaises(ValueError):
+                field.format_spec
+
+    def test_format_spec__is_lazy_evaluated(self):
+        fields = [Field(), Field("name"),
+                  Field("name", "type"), Field(format="type")]
+        for field in fields:
+            self.assertIsNone(field._format_spec)
+            if field.format:
+                _ = field.format_spec.type
+                self.assertIsNotNone(field.format_spec)
+            else:
+                self.assertIsNone(field.format_spec)
+
+    def test_set_format_invalidates_format_spec(self):
+        field = Field(format="Number")
+        self.assertEqual(field.format, "Number")
+        self.assertEqual(field.format_spec.type, "Number")
+        self.assertEqual(field.format_spec.align, None)
+
+        field.set_format("<ManyNumbers")
+        self.assertEqual(field.format, "<ManyNumbers")
+        self.assertEqual(field.format_spec.type, "ManyNumbers")
+        self.assertEqual(field.format_spec.align, '<')
+
+    def test_to_string_conversion(self):
+        test_data = [
+            (Field(),               "{}"),
+            (Field("name"),         "{name}"),
+            (Field(format="type"),  "{:type}"),
+            (Field("name", "type"), "{name:type}"),
+        ]
+        for field, expected_text in test_data:
+            text = str(field)
+            self.assertEqual(text, expected_text)
+
+    def test_equal__with_field(self):
+        for field  in self.FIELDS:
+            other = field
+            self.assertEqual(field, other)
+
+    def test_equal__with_string(self):
+        for field  in self.FIELDS:
+            other = str(field)
+            self.assertEqual(field, other)
+
+    def test_equal__with_unsupported(self):
+        UNSUPPORTED_TYPES =  [None, make_format_spec(), True, False, 10]
+        field = Field()
+        for other in UNSUPPORTED_TYPES:
+            with self.assertRaises(ValueError):
+                field == other
+
+    def test_not_equal__with_field(self):
+        for field  in self.FIELDS:
+            other2 = Field(field.name, "XXX")
+            self.assertNotEqual(field.format, other2.format)
+            self.assertNotEqual(field, other2)
+
+            other3 = Field("xxx", field.format)
+            self.assertNotEqual(field.name, other3.name)
+            self.assertNotEqual(field, other3)
+
+    def test_not_equal__with_string(self):
+        for field  in self.FIELDS:
+            other2 = Field(field.name, "XXX")
+            other2_text = str(other2)
+            self.assertNotEqual(field.format, other2.format)
+            self.assertNotEqual(field, other2_text)
+
+            other3 = Field("xxx", field.format)
+            other3_text = str(other3)
+            self.assertNotEqual(field.name, other3.name)
+            self.assertNotEqual(field, other3_text)
+
+    def test_not_equal__with_unsupported(self):
+        UNSUPPORTED_TYPES =  [None, make_format_spec(), True, False, 10]
+        field = Field()
+        for other in UNSUPPORTED_TYPES:
+            with self.assertRaises(ValueError):
+                field != other
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE:
+# -----------------------------------------------------------------------------
+class TestFieldFormatSpec(TestCase):
+    """
+    Test Field.extract_format_spec().
+
+    FORMAT-SPEC SCHEMA:
+    [[fill]align][0][width][.precision][type]
+    """
+
+    def assertValidFormatWidth(self, width):
+        self.assertIsInstance(width, str)
+        for char in width:
+            self.assertTrue(char.isdigit())
+
+    def assertValidFormatAlign(self, align):
+        self.assertIsInstance(align, str)
+        self.assertEqual(len(align), 1)
+        self.assertIn(align, Field.ALIGN_CHARS)
+
+    def assertValidFormatPrecision(self, precision):
+        self.assertIsInstance(precision, str)
+        for char in precision:
+            self.assertTrue(char.isdigit())
+
+    def test_extract_format_spec__with_empty_string_raises_error(self):
+        with self.assertRaises(ValueError) as cm:
+            Field.extract_format_spec("")
+        self.assertIn("INVALID-FORMAT", str(cm.exception))
+
+    def test_extract_format_spec__with_type(self):
+        format_types = ["d", "w", "Number", "Number?", "Number*", "Number+"]
+        for format_type in format_types:
+            format_spec = Field.extract_format_spec(format_type)
+            expected_spec = make_format_spec(format_type)
+            self.assertEqual(format_spec.type, format_type)
+            self.assertEqual(format_spec.width, "")
+            self.assertEqual(format_spec.zero, False)
+            self.assertIsNone(format_spec.align)
+            self.assertIsNone(format_spec.fill)
+            self.assertEqual(format_spec, expected_spec)
+
+    def test_extract_format_spec_with_width_only_raises_error(self):
+        # -- INVALID-FORMAT: Width without type.
+        with self.assertRaises(ValueError) as cm:
+            Field.extract_format_spec("123")
+        self.assertEqual("INVALID-FORMAT: 123 (without type)", str(cm.exception))
+
+    def test_extract_format_spec__with_zero_only_raises_error(self):
+        # -- INVALID-FORMAT: Width without type.
+        with self.assertRaises(ValueError) as cm:
+            Field.extract_format_spec("0")
+        self.assertEqual("INVALID-FORMAT: 0 (without type)", str(cm.exception))
+
+    def test_extract_format_spec__with_align_only_raises_error(self):
+        # -- INVALID-FORMAT: Width without type.
+        for align in Field.ALIGN_CHARS:
+            with self.assertRaises(ValueError) as cm:
+                Field.extract_format_spec(align)
+            self.assertEqual("INVALID-FORMAT: %s (without type)" % align,
+                             str(cm.exception))
+
+    def test_extract_format_spec_with_fill_and_align_only_raises_error(self):
+        # -- INVALID-FORMAT: Width without type.
+        fill = "_"
+        for align in Field.ALIGN_CHARS:
+            with self.assertRaises(ValueError) as cm:
+                format = fill + align
+                Field.extract_format_spec(format)
+            self.assertEqual("INVALID-FORMAT: %s (without type)" % format,
+                             str(cm.exception))
+
+    def test_extract_format_spec__with_width_and_type(self):
+        formats = ["1s", "2d", "6s", "10d", "60f", "123456789s"]
+        for format in formats:
+            format_spec = Field.extract_format_spec(format)
+            expected_type  = format[-1]
+            expected_width = format[:-1]
+            expected_spec = make_format_spec(type=expected_type,
+                                             width=expected_width)
+            self.assertEqual(format_spec, expected_spec)
+            self.assertValidFormatWidth(format_spec.width)
+
+    def test_extract_format_spec__with_precision_and_type(self):
+        formats = [".2d", ".6s", ".6f"]
+        for format in formats:
+            format_spec = Field.extract_format_spec(format)
+            expected_type  = format[-1]
+            expected_precision = format[1:-1]
+            expected_spec = make_format_spec(type=expected_type,
+                                             precision=expected_precision)
+            self.assertEqual(format_spec, expected_spec)
+            self.assertValidFormatPrecision(format_spec.precision)
+
+    def test_extract_format_spec__with_zero_and_type(self):
+        formats = ["0s", "0d", "0Number", "0Number+"]
+        for format in formats:
+            format_spec = Field.extract_format_spec(format)
+            expected_type  = format[1:]
+            expected_spec = make_format_spec(type=expected_type, zero=True)
+            self.assertEqual(format_spec, expected_spec)
+
+    def test_extract_format_spec__with_align_and_type(self):
+        # -- ALIGN_CHARS = "<>=^"
+        formats = ["<s", ">d", "=Number", "^Number+"]
+        for format in formats:
+            format_spec = Field.extract_format_spec(format)
+            expected_align = format[0]
+            expected_type  = format[1:]
+            expected_spec = make_format_spec(type=expected_type,
+                                             align=expected_align)
+            self.assertEqual(format_spec, expected_spec)
+            self.assertValidFormatAlign(format_spec.align)
+
+    def test_extract_format_spec__with_fill_align_and_type(self):
+        # -- ALIGN_CHARS = "<>=^"
+        formats = ["X<s", "_>d", "0=Number", " ^Number+"]
+        for format in formats:
+            format_spec = Field.extract_format_spec(format)
+            expected_fill  = format[0]
+            expected_align = format[1]
+            expected_type  = format[2:]
+            expected_spec = make_format_spec(type=expected_type,
+                                    align=expected_align, fill=expected_fill)
+            self.assertEqual(format_spec, expected_spec)
+            self.assertValidFormatAlign(format_spec.align)
+
+    # -- ALIGN_CHARS = "<>=^"
+    FORMAT_AND_FORMAT_SPEC_DATA = [
+            ("^010Number+", make_format_spec(type="Number+", width="10",
+                                        zero=True, align="^", fill=None)),
+            ("X<010Number+", make_format_spec(type="Number+", width="10",
+                                        zero=True, align="<", fill="X")),
+            ("_>0098Number?", make_format_spec(type="Number?", width="098",
+                                        zero=True, align=">", fill="_")),
+            ("*=129Number*", make_format_spec(type="Number*", width="129",
+                                        zero=False, align="=", fill="*")),
+            ("X129Number?", make_format_spec(type="X129Number?", width="",
+                                        zero=False, align=None, fill=None)),
+            (".3Number", make_format_spec(type="Number", width="",
+                                           zero=False, align=None, fill=None,
+                                           precision="3")),
+            ("6.2Number", make_format_spec(type="Number", width="6",
+                                      zero=False, align=None, fill=None,
+                                      precision="2")),
+    ]
+
+    def test_extract_format_spec__with_all(self):
+        for format, expected_spec in self.FORMAT_AND_FORMAT_SPEC_DATA:
+            format_spec = Field.extract_format_spec(format)
+            self.assertEqual(format_spec, expected_spec)
+            self.assertValidFormatWidth(format_spec.width)
+            if format_spec.align is not None:
+                self.assertValidFormatAlign(format_spec.align)
+
+    def test_make_format(self):
+        for expected_format, format_spec in self.FORMAT_AND_FORMAT_SPEC_DATA:
+            format = Field.make_format(format_spec)
+            self.assertEqual(format, expected_format)
+            format_spec2 = Field.extract_format_spec(format)
+            self.assertEqual(format_spec2, format_spec)
+
+
+# -----------------------------------------------------------------------------
+# TEST CASE:
+# -----------------------------------------------------------------------------
+class TestFieldParser(TestCase):
+    INVALID_FIELDS = ["", "{", "}", "xxx", "name:type", ":type"]
+    VALID_FIELD_DATA = [
+        ("{}",          Field()),
+        ("{name}",      Field("name")),
+        ("{:type}",     Field(format="type")),
+        ("{name:type}", Field("name", "type"))
+    ]
+
+    #def assertFieldEqual(self, actual, expected):
+    #    message = "FAILED: %s == %s" %  (actual, expected)
+    #    self.assertIsInstance(actual, Field)
+    #    self.assertIsInstance(expected, Field)
+    #    self.assertEqual(actual, expected, message)
+    #    # self.assertEqual(actual.name,   expected.name, message)
+    #    # self.assertEqual(actual.format, expected.format, message)
+
+    def test_parse__raises_error_with_missing_or_partial_braces(self):
+        for field_text in self.INVALID_FIELDS:
+            with self.assertRaises(ValueError):
+                FieldParser.parse(field_text)
+
+    def test_parse__with_valid_fields(self):
+        for field_text, expected_field in self.VALID_FIELD_DATA:
+            field = FieldParser.parse(field_text)
+            self.assertEqual(field, expected_field)
+
+    def test_extract_fields__without_field(self):
+        prefix = "XXX ___"
+        suffix = "XXX {{escaped_field}} {{escaped_field:xxx_type}} XXX"
+        field_texts = [prefix, suffix, prefix + suffix, suffix + prefix]
+
+        for field_text in field_texts:
+            fields = list(FieldParser.extract_fields(field_text))
+            self.assertEqual(len(fields), 0)
+
+    def test_extract_fields__with_one_field(self):
+        prefix = "XXX ___"
+        suffix = "XXX {{escaped_field}} {{escaped_field:xxx_type}} XXX"
+
+        for field_text, expected_field in self.VALID_FIELD_DATA:
+            fields = list(FieldParser.extract_fields(field_text))
+            self.assertEqual(len(fields), 1)
+            self.assertSequenceEqual(fields, [expected_field])
+
+            field_text2 = prefix + field_text + suffix
+            fields2 = list(FieldParser.extract_fields(field_text2))
+            self.assertEqual(len(fields2), 1)
+            self.assertSequenceEqual(fields, fields2)
+
+    def test_extract_fields__with_many_fields(self):
+        MANY_FIELDS_DATA = [
+            ("{}xxx{name2}",     [Field(), Field("name2")]),
+            ("{name1}yyy{:type2}", [Field("name1"), Field(format="type2")]),
+            ("{:type1}xxx{name2}{name3:type3}",
+            [Field(format="type1"), Field("name2"), Field("name3", "type3")]),
+        ]
+        prefix = "XXX ___"
+        suffix = "XXX {{escaped_field}} {{escaped_field:xxx_type}} XXX"
+
+        for field_text, expected_fields in MANY_FIELDS_DATA:
+            fields = list(FieldParser.extract_fields(field_text))
+            self.assertEqual(len(fields), len(expected_fields))
+            self.assertSequenceEqual(fields, expected_fields)
+
+            field_text2 = prefix + field_text + suffix
+            fields2 = list(FieldParser.extract_fields(field_text2))
+            self.assertEqual(len(fields2), len(expected_fields))
+            self.assertSequenceEqual(fields2, expected_fields)
+
+
+    def test_extract_types(self):
+        MANY_TYPES_DATA = [
+            ("{}xxx{name2}",                    []),
+            ("{name1}yyy{:type2}",              ["type2"]),
+            ("{:type1}xxx{name2}{name3:type3}", ["type1", "type3"]),
+        ]
+
+        for field_text, expected_types in MANY_TYPES_DATA:
+            type_names = list(FieldParser.extract_types(field_text))
+            self.assertEqual(len(type_names), len(expected_types))
+            self.assertSequenceEqual(type_names, expected_types)
+
+
+# -----------------------------------------------------------------------------
+# MAIN:
+# -----------------------------------------------------------------------------
+if __name__ == '__main__':
+    unittest.main()
+
+
+# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice shall be included in
+#  all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..e284c4a
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,86 @@
+# ============================================================================
+# TOX CONFIGURATION: parse_type
+# ============================================================================
+# DESCRIPTION:
+#
+#   Use tox to run tasks (tests, ...) in a clean virtual environment.
+#   Tox is configured by default for online usage.
+#
+#   Run tox, like:
+#
+#       tox -e py27
+#       tox -e py37
+#
+# SEE ALSO:
+#   * https://tox.readthedocs.io/en/latest/config.html
+# ============================================================================
+# -- ONLINE USAGE:
+# PIP_INDEX_URL = https://pypi.org/simple
+
+[tox]
+minversion = 3.10.0
+envlist    = py27, py37, py38, pypy, pypy3, doctest
+skip_missing_interpreters = True
+sitepackages = False
+indexserver =
+    default = https://pypi.org/simple
+
+
+# -----------------------------------------------------------------------------
+# TEST ENVIRONMENTS:
+# -----------------------------------------------------------------------------
+# install_command = pip install -U {opts} {packages}
+[testenv]
+changedir = {toxinidir}
+commands =
+    pytest {posargs:tests}
+deps =
+    pytest <  5.0; python_version <  '3.0'  # >= 4.2
+    pytest >= 5.0; python_version >= '3.0'
+    pytest-html >= 1.19.0
+setenv =
+    TOXRUN = yes
+    PYSETUP_BOOTSTRAP = no
+
+
+[testenv:doctest]
+commands =
+    pytest --doctest-modules -v parse_type
+
+
+# -----------------------------------------------------------------------------
+# MORE TEST ENVIRONMENTS:
+# -----------------------------------------------------------------------------
+[testenv:coverage]
+commands =
+    pytest --cov=parse_type {posargs:tests}
+    coverage combine
+    coverage html
+    coverage xml
+deps =
+    {[testenv]deps}
+    pytest-cov
+    coverage>=4.0
+
+[testenv:install]
+changedir = {envdir}
+commands =
+    python ../../setup.py install -q
+    {toxinidir}/bin/toxcmd.py copytree ../../tests .
+    pytest {posargs:tests}
+deps =
+    pytest>=3.2
+
+
+# -----------------------------------------------------------------------------
+# SELDOM USED TEST ENVIRONMENTS:
+# -----------------------------------------------------------------------------
+# -- ENSURE: README.rst is well-formed.
+# python setup.py --long-description | rst2html.py >output.html
+; [testenv:check_setup]
+; changedir = {toxinidir}
+; commands=
+;     python setup.py --long-description > output.tmp
+;     rst2html.py output.tmp output.html
+; deps =
+;     docutils